summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/netapp/ontap/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/netapp/ontap/plugins
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/netapp/ontap/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py106
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py745
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py159
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py392
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py160
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py93
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py133
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py233
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py824
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py289
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py196
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py443
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py220
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py227
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py324
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py243
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py387
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py525
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py135
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py332
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py319
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py235
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py363
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py316
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py310
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py458
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py217
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py360
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py366
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py737
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py470
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py357
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py198
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py1787
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py613
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py311
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py273
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py339
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py394
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py357
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py228
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py419
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py333
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py276
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py757
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py188
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py287
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py185
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py170
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py222
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py210
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py216
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py407
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py315
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py236
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py434
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py332
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py190
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py599
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py147
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py360
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py292
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py228
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py211
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py221
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py363
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py284
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py385
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py287
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py335
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py317
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py457
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py255
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py450
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py144
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py617
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py305
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py455
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py233
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py292
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py895
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py837
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py333
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py500
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py154
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py133
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py417
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py255
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py540
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py162
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py245
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py353
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py259
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py712
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py274
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py2100
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py364
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py280
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py226
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py182
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py424
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py326
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py312
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py301
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py299
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py347
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py193
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py344
108 files changed, 39355 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py
new file mode 100644
index 00000000..725d3248
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+
+ # Documentation fragment for ONTAP (na_ontap)
+ NA_ONTAP = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ type: str
+ required: true
+ username:
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ - Two authentication methods are supported
+ - 1. basic authentication, using username and password,
+ - 2. SSL certificate authentication, using a ssl client cert file, and optionally a private key file.
+ - To use a certificate, the certificate must have been installed in the ONTAP cluster, and cert authentication must have been enabled.
+ type: str
+ aliases: [ user ]
+ password:
+ description:
+ - Password for the specified user.
+ type: str
+ aliases: [ pass ]
+ cert_filepath:
+ description:
+ - path to SSL client cert file (.pem).
+ - not supported with python 2.6.
+ type: str
+ version_added: 20.6.0
+ key_filepath:
+ description:
+ - path to SSL client key file.
+ type: str
+ version_added: 20.6.0
+ https:
+ description:
+ - Enable and disable https.
+ - Ignored when using REST as only https is supported.
+ - Ignored when using SSL certificate authentication as it requires SSL.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ http_port:
+ description:
+ - Override the default port (80 or 443) with this port
+ type: int
+ ontapi:
+ description:
+ - The ontap api version to use
+ type: int
+ use_rest:
+ description:
+ - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
+ - always -- will always use the REST API
+ - never -- will always use the ZAPI
+ - auto -- will try to use the REST Api
+ default: auto
+ type: str
+ feature_flags:
+ description:
+ - Enable or disable a new feature.
+ - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility.
+ - Supported keys and values are subject to change without notice. Unknown keys are ignored.
+ type: dict
+ version_added: "20.5.0"
+
+
+requirements:
+ - A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward.
+ - REST support requires ONTAP 9.6 or later.
+ - Ansible 2.6
+ - Ansible 2.9 or later is strongly recommended as it enables the new collection delivery system.
+ - Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
+ - Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
+ - netapp-lib 2020.3.12 is strongly recommended as it provides better error reporting for connection issues.
+ - To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
+
+notes:
+ - The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
+
+'''
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py
new file mode 100644
index 00000000..73693f54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py
@@ -0,0 +1,745 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''
+netapp.py
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import os
+import ssl
+import time
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+COLLECTION_VERSION = "20.12.0"
+
+try:
+ from netapp_lib.api.zapi import zapi
+ HAS_NETAPP_LIB = True
+except ImportError:
+ HAS_NETAPP_LIB = False
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+ERROR_MSG = dict(
+ no_cserver='This module is expected to run as cluster admin'
+)
+
+try:
+ from solidfire.factory import ElementFactory
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+
+def has_netapp_lib():
+ return HAS_NETAPP_LIB
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def na_ontap_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=False, type='str', aliases=['user']),
+ password=dict(required=False, type='str', aliases=['pass'], no_log=True),
+ https=dict(required=False, type='bool', default=False),
+ validate_certs=dict(required=False, type='bool', default=True),
+ http_port=dict(required=False, type='int'),
+ ontapi=dict(required=False, type='int'),
+ use_rest=dict(required=False, type='str', default='auto'),
+ feature_flags=dict(required=False, type='dict', default=dict()),
+ cert_filepath=dict(required=False, type='str'),
+ key_filepath=dict(required=False, type='str'),
+ )
+
+
+def has_feature(module, feature_name):
+ feature = get_feature(module, feature_name)
+ if isinstance(feature, bool):
+ return feature
+ module.fail_json(msg="Error: expected bool type for feature flag: %s" % feature_name)
+
+
+def get_feature(module, feature_name):
+ ''' if the user has configured the feature, use it
+ otherwise, use our default
+ '''
+ default_flags = dict(
+ check_required_params_for_none=True,
+ classic_basic_authorization=False, # use ZAPI wrapper to send Authorization header
+ deprecation_warning=True,
+ sanitize_xml=True,
+ sanitize_code_points=[8], # unicode values, 8 is backspace
+ show_modified=True
+ )
+
+ if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']:
+ return module.params['feature_flags'][feature_name]
+ if feature_name in default_flags:
+ return default_flags[feature_name]
+ module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name)
+
+
+def create_sf_connection(module, port=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_SF_SDK and hostname and username and password:
+ try:
+ return_val = ElementFactory.create(hostname, username, password, port=port)
+ return return_val
+ except Exception:
+ raise Exception("Unable to create SF connection")
+ else:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+
+def set_auth_method(module, username, password, cert_filepath, key_filepath):
+ error = None
+ if password is None and username is None:
+ if cert_filepath is None and key_filepath is not None:
+ error = 'Error: cannot have a key file without a cert file'
+ elif cert_filepath is None:
+ error = 'Error: ONTAP module requires username/password or SSL certificate file(s)'
+ elif key_filepath is None:
+ auth_method = 'single_cert'
+ else:
+ auth_method = 'cert_key'
+ elif password is not None and username is not None:
+ if cert_filepath is not None or key_filepath is not None:
+ error = 'Error: cannot have both basic authentication (username/password) ' +\
+ 'and certificate authentication (cert/key files)'
+ elif has_feature(module, 'classic_basic_authorization'):
+ auth_method = 'basic_auth'
+ else:
+ auth_method = 'speedy_basic_auth'
+ else:
+ error = 'Error: username and password have to be provided together'
+ if cert_filepath is not None or key_filepath is not None:
+ error += ' and cannot be used with cert or key files'
+ if error:
+ module.fail_json(msg=error)
+ return auth_method
+
+
+def setup_na_ontap_zapi(module, vserver=None, wrap_zapi=False):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ https = module.params['https']
+ validate_certs = module.params['validate_certs']
+ port = module.params['http_port']
+ version = module.params['ontapi']
+ cert_filepath = module.params['cert_filepath']
+ key_filepath = module.params['key_filepath']
+ auth_method = set_auth_method(module, username, password, cert_filepath, key_filepath)
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ if auth_method in ('single_cert', 'cert_key'):
+ # override NaServer in netapp-lib to enable certificate authentication
+ server = OntapZAPICx(hostname, module=module, username=username, password=password,
+ validate_certs=validate_certs, cert_filepath=cert_filepath,
+ key_filepath=key_filepath, style=zapi.NaServer.STYLE_CERTIFICATE,
+ auth_method=auth_method)
+ # SSL certificate authentication requires SSL
+ https = True
+ elif auth_method == 'speedy_basic_auth' or wrap_zapi:
+ # override NaServer in netapp-lib to add Authorization header preemptively
+ # use wrapper to handle parse error (mostly for na_ontap_command)
+ server = OntapZAPICx(hostname, module=module, username=username, password=password,
+ validate_certs=validate_certs, auth_method=auth_method)
+ else:
+ # legacy netapp-lib
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ if version:
+ minor = version
+ else:
+ minor = 110
+ server.set_api_version(major=1, minor=minor)
+ # default is HTTP
+ if https:
+ if port is None:
+ port = 443
+ transport_type = 'HTTPS'
+ # HACK to bypass certificate verification
+ if validate_certs is False:
+ if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ else:
+ if port is None:
+ port = 80
+ transport_type = 'HTTP'
+ server.set_transport_type(transport_type)
+ server.set_port(port)
+ server.set_server_type('FILER')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def is_zapi_connection_error(message):
+ ''' return True if it is a connection issue '''
+ # netapp-lib message may contain a tuple or a str!
+ if isinstance(message, tuple) and isinstance(message[0], ConnectionError):
+ return True
+ if isinstance(message, str) and message.startswith(('URLError', 'Unauthorized')):
+ return True
+ return False
+
+
+def is_zapi_write_access_error(message):
+ ''' return True if it is a connection issue '''
+ # netapp-lib message may contain a tuple or a str!
+ if isinstance(message, str) and message.startswith('Insufficient privileges:'):
+ return 'does not have write access' in message
+ return False
+
+
+def ems_log_event(source, server, name="Ansible", ident="12345", version=COLLECTION_VERSION,
+ category="Information", event="setup", autosupport="false"):
+ ems_log = zapi.NaElement('ems-autosupport-log')
+ # Host name invoking the API.
+ ems_log.add_new_child("computer-name", name)
+ # ID of event. A user defined event-id, range [0..2^32-2].
+ ems_log.add_new_child("event-id", ident)
+ # Name of the application invoking the API.
+ ems_log.add_new_child("event-source", source)
+ # Version of application invoking the API.
+ ems_log.add_new_child("app-version", version)
+ # Application defined category of the event.
+ ems_log.add_new_child("category", category)
+ # Description of event to log. An application defined message to log.
+ ems_log.add_new_child("event-description", event)
+ ems_log.add_new_child("log-level", "6")
+ ems_log.add_new_child("auto-support", autosupport)
+ try:
+ server.invoke_successfully(ems_log, True)
+ except zapi.NaApiError as exc:
+ # Do not fail if we can't connect to the server.
+ # The module will report a better error when trying to get some data from ONTAP.
+ # Do not fail if we don't have write privileges.
+ if not is_zapi_connection_error(exc.message) and not is_zapi_write_access_error(exc.message):
+ # raise on other errors, as it may be a bug in calling the ZAPI
+ raise exc
+
+
+def get_cserver_zapi(server):
+ ''' returns None if not run on the management or cluster IP '''
+ vserver_info = zapi.NaElement('vserver-get-iter')
+ query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
+ query = zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+ try:
+ result = server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+ except zapi.NaApiError as exc:
+ # Do not fail if we can't connect to the server.
+ # The module will report a better error when trying to get some data from ONTAP.
+ if is_zapi_connection_error(exc.message):
+ return None
+ # raise on other errors, as it may be a bug in calling the ZAPI
+ raise exc
+ attribute_list = result.get_child_by_name('attributes-list')
+ if attribute_list is not None:
+ vserver_list = attribute_list.get_child_by_name('vserver-info')
+ if vserver_list is not None:
+ return vserver_list.get_child_content('vserver-name')
+ return None
+
+
+def classify_zapi_exception(error):
+ ''' return type of error '''
+ try:
+ # very unlikely to fail, but don't take any chance
+ err_code = int(error.code)
+ except (AttributeError, ValueError):
+ err_code = 0
+ try:
+ # very unlikely to fail, but don't take any chance
+ err_msg = error.message
+ except AttributeError:
+ err_msg = ""
+ if err_code == 13005 and err_msg.startswith('Unable to find API:') and 'data vserver' in err_msg:
+ return 'missing_vserver_api_error', 'Most likely running a cluster level API as vserver: %s' % to_native(error)
+ if err_code == 13001 and err_msg.startswith("RPC: Couldn't make connection"):
+ return 'rpc_error', to_native(error)
+ return "other_error", to_native(error)
+
+
+def get_cserver(connection, is_rest=False):
+ if not is_rest:
+ return get_cserver_zapi(connection)
+
+ params = {'fields': 'type'}
+ api = "private/cli/vserver"
+ json, error = connection.get(api, params)
+ if json is None or error is not None:
+ # exit if there is an error or no data
+ return None
+ vservers = json.get('records')
+ if vservers is not None:
+ for vserver in vservers:
+ if vserver['type'] == 'admin': # cluster admin
+ return vserver['vserver']
+ if len(vservers) == 1: # assume vserver admin
+ return vservers[0]['vserver']
+
+ return None
+
+
+if HAS_NETAPP_LIB:
+ class OntapZAPICx(zapi.NaServer):
+ ''' override zapi NaServer class to:
+ - enable SSL certificate authentication
+ - ignore invalid XML characters in ONTAP output (when using CLI module)
+ - add Authorization header when using basic authentication
+ '''
+ def __init__(self, hostname=None, server_type=zapi.NaServer.SERVER_TYPE_FILER,
+ transport_type=zapi.NaServer.TRANSPORT_TYPE_HTTP,
+ style=zapi.NaServer.STYLE_LOGIN_PASSWORD, username=None,
+ password=None, port=None, trace=False, module=None,
+ cert_filepath=None, key_filepath=None, validate_certs=None,
+ auth_method=None):
+ # python 2.x syntax, but works for python 3 as well
+ super(OntapZAPICx, self).__init__(hostname, server_type=server_type,
+ transport_type=transport_type,
+ style=style, username=username,
+ password=password, port=port, trace=trace)
+ self.cert_filepath = cert_filepath
+ self.key_filepath = key_filepath
+ self.validate_certs = validate_certs
+ self.module = module
+ self.base64_creds = None
+ if auth_method == 'speedy_basic_auth':
+ auth = '%s:%s' % (username, password)
+ self.base64_creds = base64.b64encode(auth.encode()).decode()
+
+ def _create_certificate_auth_handler(self):
+ try:
+ context = ssl.create_default_context()
+ except AttributeError as exc:
+ msg = 'SSL certificate authentication requires python 2.7 or later.'
+ msg += ' More info: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+ if not self.validate_certs:
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ try:
+ context.load_cert_chain(self.cert_filepath, keyfile=self.key_filepath)
+ except IOError as exc: # python 2.7 does not have FileNotFoundError
+ msg = 'Cannot load SSL certificate, check files exist.'
+ msg += ' More info: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+ return zapi.urllib.request.HTTPSHandler(context=context)
+
+ def _parse_response(self, response):
+ ''' handling XML parsing exception '''
+ try:
+ return super(OntapZAPICx, self)._parse_response(response)
+ except zapi.etree.XMLSyntaxError as exc:
+ if has_feature(self.module, 'sanitize_xml'):
+ # some ONTAP CLI commands return BEL on error
+ new_response = response.replace(b'\x07\n', b'')
+ # And 9.1 uses \r\n rather than \n !
+ new_response = new_response.replace(b'\x07\r\n', b'')
+ # And 9.7 may send backspaces
+ for code_point in get_feature(self.module, 'sanitize_code_points'):
+ if bytes([8]) == b'\x08': # python 3
+ byte = bytes([code_point])
+ elif chr(8) == b'\x08': # python 2
+ byte = chr(code_point)
+ else: # very unlikely, noop
+ byte = b'.'
+ new_response = new_response.replace(byte, b'.')
+ try:
+ return super(OntapZAPICx, self)._parse_response(new_response)
+ except Exception:
+ # ignore a second exception, we'll report the first one
+ pass
+ try:
+ # report first exception, but include full response
+ exc.msg += ". Received: %s" % response
+ except Exception:
+ # in case the response is very badly formatted, ignore it
+ pass
+ raise exc
+
+ def _create_request(self, na_element, enable_tunneling=False):
+ ''' intercept newly created request to add Authorization header '''
+ request, netapp_element = super(OntapZAPICx, self)._create_request(na_element, enable_tunneling=enable_tunneling)
+ if self.base64_creds is not None:
+ request.add_header("Authorization", "Basic %s" % self.base64_creds)
+ return request, netapp_element
+
+
+class OntapRestAPI(object):
+ ''' wrapper to send requests to ONTAP REST APIs '''
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.hostname = self.module.params['hostname']
+ self.use_rest = self.module.params['use_rest'].lower()
+ self.cert_filepath = self.module.params['cert_filepath']
+ self.key_filepath = self.module.params['key_filepath']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ port = self.module.params['http_port']
+ if port is None:
+ self.url = 'https://' + self.hostname + '/api/'
+ else:
+ self.url = 'https://%s:%d/api/' % (self.hostname, port)
+ self.is_rest_error = None
+ self.ontap_version = dict(
+ full='unknown',
+ generation=-1,
+ major=-1,
+ minor=-1,
+ valid=False
+ )
+ self.errors = list()
+ self.debug_logs = list()
+ self.auth_method = set_auth_method(self.module, self.username, self.password, self.cert_filepath, self.key_filepath)
+ self.check_required_library()
+
+ def requires_ontap_9_6(self, module_name):
+ self.requires_ontap_version(module_name)
+
+ def requires_ontap_version(self, module_name, version='9.6'):
+ suffix = " - %s" % self.is_rest_error if self.is_rest_error is not None else ""
+ return "%s only support REST, and requires ONTAP %s or later.%s" % (module_name, version, suffix)
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None, accept=None,
+ vserver_name=None, vserver_uuid=None):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+ headers = None
+ if accept is not None or vserver_name is not None or vserver_uuid is not None:
+ headers = dict()
+ # accept is used to turn on/off HAL linking
+ if accept is not None:
+ headers['accept'] = accept
+ # vserver tunneling using vserver name and/or UUID
+ if vserver_name is not None:
+ headers['X-Dot-SVM-Name'] = vserver_name
+ if vserver_uuid is not None:
+ headers['X-Dot-SVM-UUID'] = vserver_uuid
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+ except ValueError:
+ return None, None
+ error = json.get('error')
+ return json, error
+
+ if self.auth_method == 'single_cert':
+ kwargs = dict(cert=self.cert_filepath)
+ elif self.auth_method == 'cert_key':
+ kwargs = dict(cert=(self.cert_filepath, self.key_filepath))
+ elif self.auth_method in ('basic_auth', 'speedy_basic_auth'):
+ # with requests, there is no challenge, eg no 401.
+ kwargs = dict(auth=(self.username, self.password))
+ else:
+ raise KeyError(self.auth_method)
+
+ try:
+ response = requests.request(method, url, verify=self.verify, params=params,
+ timeout=self.timeout, json=json, headers=headers, **kwargs)
+ content = response.content # for debug purposes
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ response.raise_for_status()
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+ # If an error was reported in the json payload, it is handled below
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ self.log_debug(status_code, content)
+ if not json_dict and method == 'OPTIONS':
+ # OPTIONS provides the list of supported verbs
+ json_dict['Allow'] = response.headers['Allow']
+ return status_code, json_dict, error_details
+
+ def wait_on_job(self, job, timeout=600, increment=60):
+ try:
+ url = job['_links']['self']['href'].split('api/')[1]
+ except Exception as err:
+ self.log_error(0, 'URL Incorrect format: %s\n Job: %s' % (err, job))
+ # Expecting job to be in the following format
+ # {'job':
+ # {'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7',
+ # '_links':
+ # {'self':
+ # {'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}
+ # }
+ # }
+ # }
+ keep_running = True
+ error = None
+ message = None
+ runtime = 0
+ retries = 0
+ max_retries = 3
+ while keep_running:
+ # Will run every every <increment> seconds for <timeout> seconds
+ job_json, job_error = self.get(url, None)
+ if job_error:
+ error = job_error
+ retries += 1
+ if retries > max_retries:
+ self.log_error(0, 'Job error: Reach max retries.')
+ break
+ else:
+ retries = 0
+ # a job looks like this
+ # {
+ # "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14",
+ # "description": "POST /api/cluster/metrocluster",
+ # "state": "failure",
+ # "message": "There are not enough disks in Pool1.", **OPTIONAL**
+ # "code": 2432836,
+ # "start_time": "2020-02-26T10:35:44-08:00",
+ # "end_time": "2020-02-26T10:47:38-08:00",
+ # "_links": {
+ # "self": {
+ # "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14"
+ # }
+ # }
+ # }
+
+ message = job_json.get('message', '')
+ if job_json['state'] == 'failure':
+ # if the job as failed, return message as error
+ return None, message
+ if job_json['state'] != 'running':
+ keep_running = False
+ else:
+ # Would like to post a message to user (not sure how)
+ if runtime >= timeout:
+ keep_running = False
+ if job_json['state'] != 'success':
+ self.log_error(0, 'Timeout error: Process still running')
+ if keep_running:
+ time.sleep(increment)
+ runtime += increment
+ return message, error
+
+ def get(self, api, params=None):
+ method = 'GET'
+ dummy, message, error = self.send_request(method, api, params)
+ return message, error
+
+ def post(self, api, body, params=None):
+ method = 'POST'
+ dummy, message, error = self.send_request(method, api, params, json=body)
+ return message, error
+
+ def patch(self, api, body, params=None):
+ method = 'PATCH'
+ dummy, message, error = self.send_request(method, api, params, json=body)
+ return message, error
+
+ def delete(self, api, body=None, params=None):
+ method = 'DELETE'
+ dummy, message, error = self.send_request(method, api, params, json=body)
+ return message, error
+
+ def options(self, api, params=None):
+ method = 'OPTIONS'
+ dummy, message, error = self.send_request(method, api, params)
+ return message, error
+
+ def set_version(self, message):
+ try:
+ version = message.get('version', 'not found')
+ except AttributeError:
+ self.ontap_version['full'] = 'unreadable message'
+ return
+ for key in self.ontap_version:
+ try:
+ self.ontap_version[key] = version.get(key, -1)
+ except AttributeError:
+ self.ontap_version[key] = 'unreadable version'
+ self.ontap_version['valid'] = True
+ for key in self.ontap_version:
+ if self.ontap_version == -1:
+ self.ontap_version['valid'] = False
+ break
+
+ def _is_rest(self, used_unsupported_rest_properties=None):
+ if self.use_rest not in ['always', 'auto', 'never']:
+ error = "use_rest must be one of: never, always, auto. Got: '%s'" % self.use_rest
+ return False, error
+ if self.use_rest == "always":
+ if used_unsupported_rest_properties:
+ error = "REST API currently does not support '%s'" % \
+ ', '.join(used_unsupported_rest_properties)
+ return True, error
+ else:
+ return True, None
+ if self.use_rest == 'never' or used_unsupported_rest_properties:
+ # force ZAPI if requested or if some parameter requires it
+ return False, None
+ # using GET rather than HEAD because the error messages are different
+ method = 'GET'
+ api = 'cluster'
+ params = {'fields': ['version']}
+ status_code, message, error = self.send_request(method, api, params=params)
+ self.set_version(message)
+ self.is_rest_error = str(error) if error else None
+ if status_code == 200:
+ return True, None
+ self.log_error(status_code, str(error))
+ return False, None
+
+ def is_rest(self, used_unsupported_rest_properties=None):
+ ''' only return error if there is a reason to '''
+ use_rest, error = self._is_rest(used_unsupported_rest_properties)
+ if used_unsupported_rest_properties is None:
+ return use_rest
+ return use_rest, error
+
+ def log_error(self, status_code, message):
+ self.errors.append(message)
+ self.debug_logs.append((status_code, message))
+
+ def log_debug(self, status_code, content):
+ self.debug_logs.append((status_code, content))
+
+ def write_to_file(self, tag, data=None, filepath=None, append=True):
+ '''
+ This function is only for debug purposes, all calls to write_to_file should be removed
+ before submitting.
+ If data is None, tag is considered as data
+ else tag is a label, and data is data.
+ '''
+ if filepath is None:
+ filepath = '/tmp/ontap_log'
+ if append:
+ mode = 'a'
+ else:
+ mode = 'w'
+ with open(filepath, mode) as afile:
+ if data is not None:
+ afile.write("%s: %s\n" % (str(tag), str(data)))
+ else:
+ afile.write(str(tag))
+ afile.write('\n')
+
+ def write_errors_to_file(self, tag=None, filepath=None, append=True):
+ if tag is None:
+ tag = 'Error'
+ for error in self.errors:
+ self.write_to_file(tag, error, filepath, append)
+ if not append:
+ append = True
+
+ def write_debug_log_to_file(self, tag=None, filepath=None, append=True):
+ if tag is None:
+ tag = 'Debug'
+ for status_code, message in self.debug_logs:
+ self.write_to_file(tag, status_code, filepath, append)
+ if not append:
+ append = True
+ self.write_to_file(tag, message, filepath, append)
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py
new file mode 100644
index 00000000..b7331d87
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py
@@ -0,0 +1,159 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_SF_SDK = False
+try:
+ import solidfire.common
+
+ HAS_SF_SDK = True
+except Exception:
+ HAS_SF_SDK = False
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+class NaElementSWModule(object):
+
+ def __init__(self, elem):
+ self.elem_connect = elem
+ self.parameters = dict()
+
+ def get_volume(self, volume_id):
+ """
+ Return volume details if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume dict if found, None if not found
+ :rtype: dict
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume
+ return None
+
+ def get_volume_id(self, vol_name, account_id):
+ """
+ Return volume id from the given (valid) account_id if found
+ Return None if not found
+
+ :param vol_name: Name of the volume
+ :type vol_name: str
+ :param account_id: Account ID
+ :type account_id: int
+
+ :return: Volume ID of the first matching volume if found. None if not found.
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes_for_account(account_id=account_id)
+ for volume in volume_list.volumes:
+ if volume.name == vol_name:
+ # return volume_id
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
+
+ def volume_id_exists(self, volume_id):
+ """
+ Return volume_id if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume ID if found, None if not found
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
+
+ def volume_exists(self, volume, account_id):
+ """
+ Return volume_id if exists, None if not found
+
+ :param volume: Volume ID or Name
+ :type volume: str
+ :param account_id: Account ID (valid)
+ :type account_id: int
+ :return: Volume ID if found, None if not found
+ """
+ # If volume is an integer, get_by_id
+ if str(volume).isdigit():
+ volume_id = int(volume)
+ try:
+ if self.volume_id_exists(volume_id):
+ return volume_id
+ except solidfire.common.ApiServerError:
+ # don't fail, continue and try get_by_name
+ pass
+ # get volume by name
+ volume_id = self.get_volume_id(volume, account_id)
+ return volume_id
+
+ def get_snapshot(self, snapshot_id, volume_id):
+ """
+ Return snapshot details if found
+
+ :param snapshot_id: Snapshot ID or Name
+ :type snapshot_id: str
+ :param volume_id: Account ID (valid)
+ :type volume_id: int
+ :return: Snapshot dict if found, None if not found
+ :rtype: dict
+ """
+ # mandate src_volume_id although not needed by sdk
+ snapshot_list = self.elem_connect.list_snapshots(
+ volume_id=volume_id)
+ for snapshot in snapshot_list.snapshots:
+ # if actual id is provided
+ if str(snapshot_id).isdigit() and snapshot.snapshot_id == int(snapshot_id):
+ return snapshot
+ # if snapshot name is provided
+ elif snapshot.name == snapshot_id:
+ return snapshot
+ return None
+
+ def account_exists(self, account):
+ """
+ Return account_id if account exists for given account id or name
+ Raises an exception if account does not exist
+
+ :param account: Account ID or Name
+ :type account: str
+ :return: Account ID if found, None if not found
+ """
+ # If account is an integer, get_by_id
+ if account.isdigit():
+ account_id = int(account)
+ try:
+ result = self.elem_connect.get_account_by_id(account_id=account_id)
+ if result.account.account_id == account_id:
+ return account_id
+ except solidfire.common.ApiServerError:
+ # don't fail, continue and try get_by_name
+ pass
+ # get account by name, the method returns an Exception if account doesn't exist
+ result = self.elem_connect.get_account_by_name(username=account)
+ return result.account.account_id
+
+ def set_element_attributes(self, source):
+ """
+ Return telemetry attributes for the current execution
+
+ :param source: name of the module
+ :type source: str
+ :return: a dict containing telemetry attributes
+ """
+ attributes = {}
+ attributes['config-mgmt'] = 'ansible'
+ attributes['event-source'] = source
+ return(attributes)
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py
new file mode 100644
index 00000000..056e85e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py
@@ -0,0 +1,392 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+import re
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+def cmp(obj1, obj2):
+ """
+ Python 3 does not have a cmp function, this will do the cmp.
+ :param obj1: first object to check
+ :param obj2: second object to check
+ :return:
+ """
+ # convert to lower case for string comparison.
+ if obj1 is None:
+ return -1
+ if isinstance(obj1, str) and isinstance(obj2, str):
+ obj1 = obj1.lower()
+ obj2 = obj2.lower()
+ # if list has string element, convert string to lower case.
+ if isinstance(obj1, list) and isinstance(obj2, list):
+ obj1 = [x.lower() if isinstance(x, str) else x for x in obj1]
+ obj2 = [x.lower() if isinstance(x, str) else x for x in obj2]
+ obj1.sort()
+ obj2.sort()
+ return (obj1 > obj2) - (obj1 < obj2)
+
+
+class NetAppModule(object):
+ '''
+ Common class for NetApp modules
+ set of support functions to derive actions based
+ on the current state of the system, and a desired state
+ '''
+
+ def __init__(self):
+ self.log = list()
+ self.changed = False
+ self.parameters = {'name': 'not initialized'}
+ self.zapi_string_keys = dict()
+ self.zapi_bool_keys = dict()
+ self.zapi_list_keys = dict()
+ self.zapi_int_keys = dict()
+ self.zapi_required = dict()
+
+ def set_parameters(self, ansible_params):
+ self.parameters = dict()
+ for param in ansible_params:
+ if ansible_params[param] is not None:
+ self.parameters[param] = ansible_params[param]
+ return self.parameters
+
+ def check_and_set_parameters(self, module):
+ self.parameters = dict()
+ check_for_none = netapp_utils.has_feature(module, 'check_required_params_for_none')
+ if check_for_none:
+ required_keys = [key for key, value in module.argument_spec.items() if value.get('required')]
+ for param in module.params:
+ if module.params[param] is not None:
+ self.parameters[param] = module.params[param]
+ elif check_for_none and param in required_keys:
+ module.fail_json(msg="%s requires a value, got: None" % param)
+ return self.parameters
+
+ @staticmethod
+ def type_error_message(type_str, key, value):
+ return "expecting '%s' type for %s: %s, got: %s" % (type_str, repr(key), repr(value), type(value))
+
+ def get_value_for_bool(self, from_zapi, value, key=None):
+ """
+ Convert boolean values to string or vice-versa
+ If from_zapi = True, value is converted from string (as it appears in ZAPI) to boolean
+ If from_zapi = False, value is converted from boolean to string
+ For get() method, from_zapi = True
+ For modify(), create(), from_zapi = False
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :param value: value of the boolean attribute
+ :param key: if present, force error checking to validate type, and accepted values
+ :return: string or boolean
+ """
+ if value is None:
+ return None
+ if from_zapi:
+ if key is not None and not isinstance(value, str):
+ raise TypeError(self.type_error_message('str', key, value))
+ if key is not None and value not in ('true', 'false'):
+ raise ValueError('Unexpected value: %s received from ZAPI for boolean attribute: %s' % (repr(value), repr(key)))
+ return value == 'true'
+ if key is not None and not isinstance(value, bool):
+ raise TypeError(self.type_error_message('bool', key, value))
+ return 'true' if value else 'false'
+
+ def get_value_for_int(self, from_zapi, value, key=None):
+ """
+ Convert integer values to string or vice-versa
+ If from_zapi = True, value is converted from string (as it appears in ZAPI) to integer
+ If from_zapi = False, value is converted from integer to string
+ For get() method, from_zapi = True
+ For modify(), create(), from_zapi = False
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :param value: value of the integer attribute
+ :param key: if present, force error checking to validate type
+ :return: string or integer
+ """
+ if value is None:
+ return None
+ if from_zapi:
+ if key is not None and not isinstance(value, str):
+ raise TypeError(self.type_error_message('str', key, value))
+ return int(value)
+ if key is not None and not isinstance(value, int):
+ raise TypeError(self.type_error_message('int', key, value))
+ return str(value)
+
+ def get_value_for_list(self, from_zapi, zapi_parent, zapi_child=None, data=None):
+ """
+ Convert a python list() to NaElement or vice-versa
+ If from_zapi = True, value is converted from NaElement (parent-children structure) to list()
+ If from_zapi = False, value is converted from list() to NaElement
+ :param zapi_parent: ZAPI parent key or the ZAPI parent NaElement
+ :param zapi_child: ZAPI child key
+ :param data: list() to be converted to NaElement parent-children object
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :return: list() or NaElement
+ """
+ if from_zapi:
+ if zapi_parent is None:
+ return []
+ return [zapi_child.get_content() for zapi_child in zapi_parent.get_children()]
+
+ zapi_parent = netapp_utils.zapi.NaElement(zapi_parent)
+ for item in data:
+ zapi_parent.add_new_child(zapi_child, item)
+ return zapi_parent
+
+ def get_cd_action(self, current, desired):
+ ''' takes a desired state and a current state, and return an action:
+ create, delete, None
+ eg:
+ is_present = 'absent'
+ some_object = self.get_object(source)
+ if some_object is not None:
+ is_present = 'present'
+ action = cd_action(current=is_present, desired = self.desired.state())
+ '''
+ if 'state' in desired:
+ desired_state = desired['state']
+ else:
+ desired_state = 'present'
+
+ if current is None and desired_state == 'absent':
+ return None
+ if current is not None and desired_state == 'present':
+ return None
+ # change in state
+ self.changed = True
+ if current is not None:
+ return 'delete'
+ return 'create'
+
+ def compare_and_update_values(self, current, desired, keys_to_compare):
+ updated_values = dict()
+ is_changed = False
+ for key in keys_to_compare:
+ if key in current:
+ if key in desired and desired[key] is not None:
+ if current[key] != desired[key]:
+ updated_values[key] = desired[key]
+ is_changed = True
+ else:
+ updated_values[key] = current[key]
+ else:
+ updated_values[key] = current[key]
+
+ return updated_values, is_changed
+
+ @staticmethod
+ def check_keys(current, desired):
+ ''' TODO: raise an error if keys do not match
+ with the exception of:
+ new_name, state in desired
+ '''
+
+ @staticmethod
+ def compare_lists(current, desired, get_list_diff):
+ ''' compares two lists and return a list of elements that are either the desired elements or elements that are
+ modified from the current state depending on the get_list_diff flag
+ :param: current: current item attribute in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: list of attributes to be modified
+ :rtype: list
+ '''
+ current_copy = deepcopy(current)
+ desired_copy = deepcopy(desired)
+
+ # get what in desired and not in current
+ desired_diff_list = list()
+ for item in desired:
+ if item in current_copy:
+ current_copy.remove(item)
+ else:
+ desired_diff_list.append(item)
+
+ # get what in current but not in desired
+ current_diff_list = list()
+ for item in current:
+ if item in desired_copy:
+ desired_copy.remove(item)
+ else:
+ current_diff_list.append(item)
+
+ if desired_diff_list or current_diff_list:
+ # there are changes
+ if get_list_diff:
+ return desired_diff_list
+ else:
+ return desired
+ else:
+ return None
+
+ def get_modified_attributes(self, current, desired, get_list_diff=False):
+ ''' takes two dicts of attributes and return a dict of attributes that are
+ not in the current state
+ It is expected that all attributes of interest are listed in current and
+ desired.
+ :param: current: current attributes in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: dict of attributes to be modified
+ :rtype: dict
+
+ NOTE: depending on the attribute, the caller may need to do a modify or a
+ different operation (eg move volume if the modified attribute is an
+ aggregate name)
+ '''
+ # if the object does not exist, we can't modify it
+ modified = dict()
+ if current is None:
+ return modified
+
+ # error out if keys do not match
+ self.check_keys(current, desired)
+
+ # collect changed attributes
+ for key, value in current.items():
+ if key in desired and desired[key] is not None:
+ if isinstance(value, list):
+ modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired
+ if modified_list is not None:
+ modified[key] = modified_list
+ else:
+ try:
+ result = cmp(value, desired[key])
+ except TypeError as exc:
+ raise TypeError("%s, key: %s, value: %s, desired: %s" % (repr(exc), key, repr(value), repr(desired[key])))
+ else:
+ if result != 0:
+ modified[key] = desired[key]
+ if modified:
+ self.changed = True
+ return modified
+
+ def is_rename_action(self, source, target):
+ ''' takes a source and target object, and returns True
+ if a rename is required
+ eg:
+ source = self.get_object(source_name)
+ target = self.get_object(target_name)
+ action = is_rename_action(source, target)
+ :return: None for error, True for rename action, False otherwise
+ '''
+ if source is None and target is None:
+ # error, do nothing
+ # cannot rename an non existent resource
+ # alternatively we could create B
+ return None
+ if source is not None and target is not None:
+ # error, do nothing
+ # idempotency (or) new_name_is_already_in_use
+ # alternatively we could delete B and rename A to B
+ return False
+ if source is None and target is not None:
+ # do nothing, maybe the rename was already done
+ return False
+ # source is not None and target is None:
+ # rename is in order
+ self.changed = True
+ return True
+
+ @staticmethod
+ def sanitize_wwn(initiator):
+ ''' igroup initiator may or may not be using WWN format: eg 20:00:00:25:B5:00:20:01
+ if format is matched, convert initiator to lowercase, as this is what ONTAP is using '''
+ wwn_format = r'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}'
+ initiator = initiator.strip()
+ if re.match(wwn_format, initiator):
+ initiator = initiator.lower()
+ return initiator
+
+ def safe_get(self, an_object, key_list, allow_sparse_dict=True):
+ ''' recursively traverse a dictionary or a any object supporting get_item
+ (in our case, python dicts and NAElement responses)
+ It is expected that some keys can be missing, this is controlled with allow_sparse_dict
+
+ return value if the key chain is exhausted
+ return None if a key is not found and allow_sparse_dict is True
+ raise KeyError is a key is not found and allow_sparse_dict is False (looking for exact match)
+ raise TypeError if an intermediate element cannot be indexed,
+ unless the element is None and allow_sparse_dict is True
+ '''
+ if not key_list:
+ # we've exhausted the keys, good!
+ return an_object
+ key = key_list.pop(0)
+ try:
+ return self.safe_get(an_object[key], key_list, allow_sparse_dict=allow_sparse_dict)
+ except KeyError as exc:
+ # error, key not found
+ if allow_sparse_dict:
+ return None
+ raise exc
+ except TypeError as exc:
+ # error, we were expecting a dict or NAElement
+ if allow_sparse_dict and an_object is None:
+ return None
+ raise exc
+
+ def filter_out_none_entries(self, list_or_dict):
+ """take a dict or list as input and return a dict/list without keys/elements whose values are None
+ skip empty dicts or lists.
+ """
+
+ if isinstance(list_or_dict, dict):
+ result = dict()
+ for key, value in list_or_dict.items():
+ if isinstance(value, (list, dict)):
+ sub = self.filter_out_none_entries(value)
+ if sub:
+ # skip empty dict or list
+ result[key] = sub
+ elif value is not None:
+ # skip None value
+ result[key] = value
+ return result
+
+ if isinstance(list_or_dict, list):
+ alist = list()
+ for item in list_or_dict:
+ if isinstance(item, (list, dict)):
+ sub = self.filter_out_none_entries(item)
+ if sub:
+ # skip empty dict or list
+ alist.append(sub)
+ elif item is not None:
+ # skip None value
+ alist.append(item)
+ return alist
+
+ raise TypeError('unexpected type %s' % type(list_or_dict))
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py
new file mode 100644
index 00000000..b0b3f774
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py
@@ -0,0 +1,160 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support class for NetApp ansible modules
+
+ Provides accesss to application resources using REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class RestApplication():
+ """Helper methods to manage application and application components"""
+ def __init__(self, rest_api, svm_name, app_name):
+ self.svm_name = svm_name
+ self.app_name = app_name
+ self.app_uuid = None
+ self.rest_api = rest_api
+
+ def _set_application_uuid(self):
+ """Use REST application/applications to get application uuid"""
+ api = '/application/applications'
+ query = {'return_timeout': 30, 'return_records': 'true', 'svm.name': self.svm_name, 'name': self.app_name}
+ response, error = self.rest_api.get(api, query)
+ record, error = rrh.check_for_0_or_1_records(api, response, error, query)
+ if error is None and record is not None:
+ self.app_uuid = record['uuid']
+ return None, error
+
+ def get_application_uuid(self):
+ """Use REST application/applications to get application uuid"""
+ error = None
+ if self.app_uuid is None:
+ dummy, error = self._set_application_uuid()
+ return self.app_uuid, error
+
+ def get_application_details(self):
+ """Use REST application/applications to get application details"""
+ uuid, error = self.get_application_uuid()
+ if error:
+ return uuid, error
+ if uuid is None: # not found
+ return None, None
+ api = '/application/applications/%s' % uuid
+ response, error = self.rest_api.get(api)
+ return response, rrh.api_error(api, error)
+
+ def create_application(self, body):
+ """Use REST application/applications san template to create one or more LUNs"""
+ self.fail_if_uuid()
+ api = '/application/applications'
+ query = {'return_timeout': 30, 'return_records': 'true'}
+ response, error = self.rest_api.post(api, body, params=query)
+ return rrh.check_for_error_and_job_results(api, response, error, self.rest_api)
+
+ def create_application_body(self, template_name, template_body, smart_container=True):
+ if not isinstance(smart_container, bool):
+ error = "expecting bool value for smart_container, got: %s" % smart_container
+ return None, error
+ body = {
+ 'name': self.app_name,
+ 'svm': {'name': self.svm_name},
+ 'smart_container': smart_container,
+ template_name: template_body
+ }
+ return body, None
+
+ def delete_application(self):
+ """Use REST application/applications to delete app"""
+ self.fail_if_no_uuid()
+ api = '/application/applications/%s' % self.app_uuid
+ query = {'return_timeout': 30}
+ response, error = self.rest_api.delete(api, params=query)
+ response, error = rrh.check_for_error_and_job_results(api, response, error, self.rest_api)
+ self.app_uuid = None
+ return response, error
+
+ def get_application_components(self):
+ """Use REST application/applications to get application components"""
+ self.fail_if_no_uuid()
+ api = '/application/applications/%s/components' % self.app_uuid
+ response, error = self.rest_api.get(api)
+ return response, rrh.api_error(api, error)
+
+ def get_application_component_uuid(self):
+ """Use REST application/applications to get component uuid
+ Assume a single component per application
+ """
+ self.fail_if_no_uuid()
+ response, error = self.get_application_components()
+ record, error = rrh.check_for_0_or_1_records(None, response, error, None)
+ if error is None and record is not None:
+ return record['uuid'], None
+ return None, error
+
+ def get_application_component_details(self, comp_uuid=None):
+ """Use REST application/applications to get application components"""
+ self.fail_if_no_uuid()
+ if comp_uuid is None:
+ # assume a single component
+ comp_uuid, error = self.get_application_component_uuid()
+ if error:
+ return comp_uuid, error
+ if comp_uuid is None:
+ error = 'no component for application %s' % self.app_name
+ return None, error
+ api = '/application/applications/%s/components/%s' % (self.app_uuid, comp_uuid)
+ response, error = self.rest_api.get(api)
+ return response, rrh.api_error(api, error)
+
+ def get_application_component_backing_storage(self):
+ """Use REST application/applications to get component uuid.
+
+ Assume a single component per application
+ """
+ self.fail_if_no_uuid()
+ response, error = self.get_application_component_details()
+ if error or response is None:
+ return response, error
+ return response['backing_storage'], None
+
+ def fail_if_no_uuid(self):
+ """Prevent a logic error."""
+ if self.app_uuid is None:
+ msg = 'function should not be called before application uuid is set.'
+ return None, msg
+
+ def fail_if_uuid(self):
+ """Prevent a logic error."""
+ if self.app_uuid is not None:
+ msg = 'function should not be called when application uuid is set.'
+ return None, msg
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py
new file mode 100644
index 00000000..edebcf60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py
@@ -0,0 +1,93 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support functions for NetApp ansible modules
+
+ Provides common processing for responses and errors from REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def api_error(api, error):
+ """format error message for api error, if error is present"""
+ if error is not None:
+ return "calling: %s: got %s" % (api, error)
+ return None
+
+
+def no_response_error(api, response):
+ """format error message for empty response"""
+ return "calling: %s: no response %s" % (api, repr(response))
+
+
+def job_error(response, error):
+ """format error message for job error"""
+ return "job reported error: %s, received %s" % (error, repr(response))
+
+
+def unexpected_response_error(api, response, query=None):
+ """format error message for reponse not matching expectations"""
+ msg = "calling: %s: unexpected response %s" % (api, repr(response))
+ if query:
+ msg += " for query: %s" % repr(query)
+ return response, msg
+
+
+def check_for_0_or_1_records(api, response, error, query=None):
+ """return None if no record was returned by the API
+ return record if one record was returned by the API
+ return error otherwise (error, no response, more than 1 record)
+ """
+ if error:
+ if api:
+ return None, api_error(api, error)
+ return None, error
+ if not response:
+ return None, no_response_error(api, response)
+ if response['num_records'] == 0:
+ return None, None # not found
+ if response['num_records'] != 1:
+ return unexpected_response_error(api, response, query)
+ return response['records'][0], None
+
+
+def check_for_error_and_job_results(api, response, error, rest_api):
+ """report first error if present
+ otherwise call wait_on_job and retrieve job response or error
+ """
+ if error:
+ error = api_error(api, error)
+ elif 'job' in response:
+ job_response, error = rest_api.wait_on_job(response['job'])
+ if error:
+ error = job_error(response, error)
+ else:
+ response['job_response'] = job_response
+ return response, error
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py
new file mode 100644
index 00000000..a1a0c50c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py
@@ -0,0 +1,133 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules
+
+ Provides accesss to SVM (vserver) resources using ZAPI calls
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+def get_vserver(svm_cx, vserver_name):
+ """
+ Return vserver information.
+
+ :return:
+ vserver object if vserver found
+ None if vserver is not found
+ :rtype: object/None
+ """
+ vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-info', **{'vserver-name': vserver_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+
+ result = svm_cx.invoke_successfully(vserver_info, enable_tunneling=False)
+ vserver_details = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ vserver_info = attributes_list.get_child_by_name('vserver-info')
+ aggr_list = list()
+ # vserver aggr-list can be empty by default
+ get_list = vserver_info.get_child_by_name('aggr-list')
+ if get_list is not None:
+ aggregates = get_list.get_children()
+ for aggr in aggregates:
+ aggr_list.append(aggr.get_content())
+
+ protocols = list()
+ # allowed-protocols is not empty for data SVM, but is for node SVM
+ allowed_protocols = vserver_info.get_child_by_name('allowed-protocols')
+ if allowed_protocols is not None:
+ get_protocols = allowed_protocols.get_children()
+ for protocol in get_protocols:
+ protocols.append(protocol.get_content())
+ vserver_details = {'name': vserver_info.get_child_content('vserver-name'),
+ 'root_volume': vserver_info.get_child_content('root-volume'),
+ 'root_volume_aggregate': vserver_info.get_child_content('root-volume-aggregate'),
+ 'root_volume_security_style': vserver_info.get_child_content('root-volume-security-style'),
+ 'subtype': vserver_info.get_child_content('vserver-subtype'),
+ 'aggr_list': aggr_list,
+ 'language': vserver_info.get_child_content('language'),
+ 'quota_policy': vserver_info.get_child_content('quota-policy'),
+ 'snapshot_policy': vserver_info.get_child_content('snapshot-policy'),
+ 'allowed_protocols': protocols,
+ 'ipspace': vserver_info.get_child_content('ipspace'),
+ 'comment': vserver_info.get_child_content('comment')}
+
+ return vserver_details
+
+
+def modify_vserver(svm_cx, module, name, modify, parameters=None):
+ '''
+ Modify vserver.
+ :param name: vserver name
+ :param modify: list of modify attributes
+ :param parameters: customer original inputs
+ modify only contains the difference between the customer inputs and current
+ for some attributes, it may be safer to apply the original inputs
+ '''
+ if parameters is None:
+ parameters = modify
+
+ vserver_modify = netapp_utils.zapi.NaElement('vserver-modify')
+ vserver_modify.add_new_child('vserver-name', name)
+ for attribute in modify:
+ if attribute == 'comment':
+ vserver_modify.add_new_child('comment', parameters['comment'])
+ if attribute == 'language':
+ vserver_modify.add_new_child('language', parameters['language'])
+ if attribute == 'quota_policy':
+ vserver_modify.add_new_child('quota-policy', parameters['quota_policy'])
+ if attribute == 'snapshot_policy':
+ vserver_modify.add_new_child('snapshot-policy', parameters['snapshot_policy'])
+ if attribute == 'allowed_protocols':
+ allowed_protocols = netapp_utils.zapi.NaElement('allowed-protocols')
+ for protocol in parameters['allowed_protocols']:
+ allowed_protocols.add_new_child('protocol', protocol)
+ vserver_modify.add_child_elem(allowed_protocols)
+ if attribute == 'aggr_list':
+ aggregates = netapp_utils.zapi.NaElement('aggr-list')
+ for aggr in parameters['aggr_list']:
+ aggregates.add_new_child('aggr-name', aggr)
+ vserver_modify.add_child_elem(aggregates)
+ try:
+ svm_cx.invoke_successfully(vserver_modify, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ module.fail_json(msg='Error modifying SVM %s: %s' % (name, to_native(exc)),
+ exception=traceback.format_exc())
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py
new file mode 100644
index 00000000..f4aeb945
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_active_directory
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP configure active directory
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.9.0
+description:
+ - Configure Active Directory
+
+options:
+ state:
+ description:
+ - Whether the Active Directory should exist or not
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ account_name:
+ description:
+ - Active Directory account NetBIOS name.
+ required: true
+ type: str
+
+ admin_password:
+ description:
+ - Administrator password required for Active Directory account creation.
+ required: true
+ type: str
+
+ admin_username:
+ description:
+ - Administrator username required for Active Directory account creation.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Fully qualified domain name.
+ type: str
+
+ force_account_overwrite:
+ description:
+ - If true and a machine account with the same name as specified in 'account-name' exists in Active Directory, it will be overwritten and reused.
+ type: bool
+
+ organizational_unit:
+ description:
+ - Organizational unit under which the Active Directory account will be created.
+ type: str
+'''
+EXAMPLES = """
+-
+ name: Ontap test
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ tasks:
+ - name: run ontap active directory
+ na_ontap_active_directory:
+ hostname: 10.193.78.219
+ username: admin
+ password: netapp1!
+ https: True
+ validate_certs: False
+ vserver: laurentncluster-1
+ state: present
+ account_name: carchi
+ admin_password: password
+ admin_username: carchi
+ domain: 12
+"""
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapActiveDirectory(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ account_name=dict(required=True, type='str'),
+ admin_password=dict(required=True, type='str', no_log=True),
+ admin_username=dict(required=True, type='str'),
+ domain=dict(type="str", default=None),
+ force_account_overwrite=dict(type="bool", default=None),
+ organizational_unit=dict(type="str", default=None)
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_active_directory(self):
+ active_directory_iter = netapp_utils.zapi.NaElement('active-directory-account-get-iter')
+ acitve_directory_info = netapp_utils.zapi.NaElement('active-directory-account-config')
+ acitve_directory_info.add_new_child('account-name', self.parameters['account_name'])
+ acitve_directory_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(acitve_directory_info)
+ active_directory_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(active_directory_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Active Directory %s: %s' %
+ (self.parameters['account-name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result.get_child_by_name('attributes-list').get_child_by_name('active-directory-account-config')
+ return None
+
+ def create_active_directory(self):
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-create')
+ active_directory_obj.add_new_child('account-name', self.parameters['account_name'])
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ if self.parameters.get('domain'):
+ active_directory_obj.add_new_child('domain', self.parameters['domain'])
+ if self.parameters.get('force_account_overwrite'):
+ active_directory_obj.add_new_child('force-account-overwrite', str(self.parameters['force_account_overwrite']))
+ if self.parameters.get('organizational_unit'):
+ active_directory_obj.add_new_child('organizational-unit', self.parameters['organizational_unit'])
+ try:
+ result = self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating on Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_active_directory(self):
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-delete')
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ try:
+ result = self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting on Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_active_directory(self):
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-modify')
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ if self.parameters.get('domain'):
+ active_directory_obj.add_new_child('domain', self.parameters['domain'])
+ if self.parameters.get('force_account_overwrite'):
+ active_directory_obj.add_new_child('force-account-overwrite', str(self.parameters['force_account_overwrite']))
+ try:
+ result = self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting on Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_active_directory")
+ current = self.get_active_directory()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ # TODO add Modify
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_active_directory()
+ elif cd_action == 'delete':
+ self.delete_active_directory()
+ elif modify:
+ self.modify_active_directory()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapActiveDirectory()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py
new file mode 100644
index 00000000..3161ee1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py
@@ -0,0 +1,824 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_aggregate
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_aggregate
+short_description: NetApp ONTAP manage aggregates.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete, or manage aggregates on ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ service_state:
+ description:
+ - Whether the specified aggregate should be enabled or disabled. Creates aggregate if doesnt exist.
+ choices: ['online', 'offline']
+ type: str
+
+ name:
+ description:
+ - The name of the aggregate to manage.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the aggregate to be renamed.
+ type: str
+ version_added: 2.7.0
+
+ nodes:
+ description:
+ - Node(s) for the aggregate to be created on. If no node specified, mgmt lif home will be used.
+ - If multiple nodes specified an aggr stripe will be made.
+ type: list
+ elements: str
+
+ disk_type:
+ description:
+ - Type of disk to use to build aggregate
+ choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'VMDISK']
+ type: str
+ version_added: 2.7.0
+
+ disk_count:
+ description:
+ - Number of disks to place into the aggregate, including parity disks.
+ - The disks in this newly-created aggregate come from the spare disk pool.
+ - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
+ - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
+ - Required when C(state=present).
+ - Modifiable only if specified disk_count is larger than current disk_count.
+ - Cannot create raidgroup with 1 disk when using raid type raid4.
+ - If the disk_count % raid_size == 1, only disk_count/raid_size * raid_size will be added.
+ - If disk_count is 6, raid_type is raid4, raid_size 4, all 6 disks will be added.
+ - If disk_count is 5, raid_type is raid4, raid_size 4, 5/4 * 4 = 4 will be added. 1 will not be added.
+ type: int
+
+ disk_size:
+ description:
+ - Disk size to use in 4K block size. Disks within 10% of specified size will be used.
+ type: int
+ version_added: 2.7.0
+
+ disk_size_with_unit:
+ description:
+ - Disk size to use in the specified unit.
+ - It is a positive integer number followed by unit of T/G/M/K. For example, 72G, 1T and 32M.
+ - This option is ignored if a specific list of disks is specified through the "disks" parameter.
+ - You must only use one of either "disk-size" or "disk-size-with-unit" parameters.
+ type: str
+
+ raid_size:
+ description:
+ - Sets the maximum number of drives per raid group.
+ type: int
+ version_added: 2.7.0
+
+ raid_type:
+ description:
+ - Specifies the type of RAID groups to use in the new aggregate.
+ - raid_0 is only available on ONTAP Select.
+ choices: ['raid4', 'raid_dp', 'raid_tec', 'raid_0']
+ type: str
+ version_added: 2.7.0
+
+ unmount_volumes:
+ description:
+ - If set to "TRUE", this option specifies that all of the volumes hosted by the given aggregate are to be unmounted
+ - before the offline operation is executed.
+ - By default, the system will reject any attempt to offline an aggregate that hosts one or more online volumes.
+ type: bool
+
+ disks:
+ description:
+ - Specific list of disks to use for the new aggregate.
+ - To create a "mirrored" aggregate with a specific list of disks, both 'disks' and 'mirror_disks' options must be supplied.
+ Additionally, the same number of disks must be supplied in both lists.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ is_mirrored:
+ description:
+ - Specifies that the new aggregate be mirrored (have two plexes).
+ - If set to true, then the indicated disks will be split across the two plexes. By default, the new aggregate will not be mirrored.
+ - This option cannot be used when a specific list of disks is supplied with either the 'disks' or 'mirror_disks' options.
+ type: bool
+ version_added: 2.8.0
+
+ mirror_disks:
+ description:
+ - List of mirror disks to use. It must contain the same number of disks specified in 'disks'.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ spare_pool:
+ description:
+ - Specifies the spare pool from which to select spare disks to use in creation of a new aggregate.
+ choices: ['Pool0', 'Pool1']
+ type: str
+ version_added: 2.8.0
+
+ wait_for_online:
+ description:
+ - Set this parameter to 'true' for synchronous execution during create (wait until aggregate status is online)
+ - Set this parameter to 'false' for asynchronous execution
+ - For asynchronous, execution exits as soon as the request is sent, without checking aggregate status
+ type: bool
+ default: false
+ version_added: 2.8.0
+
+ time_out:
+ description:
+ - time to wait for aggregate creation in seconds
+ - default is set to 100 seconds
+ type: int
+ default: 100
+ version_added: 2.8.0
+
+ object_store_name:
+ description:
+ - Name of the object store configuration attached to the aggregate
+ type: str
+ version_added: 2.9.0
+
+ snaplock_type:
+ description:
+ - Type of snaplock for the aggregate being created.
+ choices: ['compliance', 'enterprise', 'non_snaplock']
+ type: str
+ version_added: 20.1.0
+
+ ignore_pool_checks:
+ description:
+ - only valid when I(disks) option is used.
+ - disks in a plex should belong to the same spare pool, and mirror disks to another spare pool.
+ - when set to true, these checks are ignored.
+ type: bool
+ version_added: 20.8.0
+'''
+
+EXAMPLES = """
+- name: Create Aggregates and wait 5 minutes until aggregate is online
+ na_ontap_aggregate:
+ state: present
+ service_state: online
+ name: ansibleAggr
+ disk_count: 1
+ wait_for_online: True
+ time_out: 300
+ snaplock_type: non_snaplock
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Manage Aggregates
+ na_ontap_aggregate:
+ state: present
+ service_state: offline
+ unmount_volumes: true
+ name: ansibleAggr
+ disk_count: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Attach object store
+ na_ontap_aggregate:
+ state: present
+ name: aggr4
+ object_store_name: sgws_305
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename Aggregates
+ na_ontap_aggregate:
+ state: present
+ service_state: online
+ from_name: ansibleAggr
+ name: ansibleAggr2
+ disk_count: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Delete Aggregates
+ na_ontap_aggregate:
+ state: absent
+ service_state: offline
+ unmount_volumes: true
+ name: ansibleAggr
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapAggregate(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ disks=dict(required=False, type='list', elements='str'),
+ disk_count=dict(required=False, type='int', default=None),
+ disk_size=dict(required=False, type='int'),
+ disk_size_with_unit=dict(required=False, type='str'),
+ disk_type=dict(required=False, choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'VMDISK']),
+ from_name=dict(required=False, type='str'),
+ mirror_disks=dict(required=False, type='list', elements='str'),
+ nodes=dict(required=False, type='list', elements='str'),
+ is_mirrored=dict(required=False, type='bool'),
+ raid_size=dict(required=False, type='int'),
+ raid_type=dict(required=False, choices=['raid4', 'raid_dp', 'raid_tec', 'raid_0']),
+ service_state=dict(required=False, choices=['online', 'offline']),
+ spare_pool=dict(required=False, choices=['Pool0', 'Pool1']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ unmount_volumes=dict(required=False, type='bool'),
+ wait_for_online=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=100),
+ object_store_name=dict(required=False, type='str'),
+ snaplock_type=dict(required=False, type='str', choices=['compliance', 'enterprise', 'non_snaplock']),
+ ignore_pool_checks=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('service_state', 'offline', ['unmount_volumes']),
+ ],
+ mutually_exclusive=[
+ ('is_mirrored', 'disks'),
+ ('is_mirrored', 'mirror_disks'),
+ ('is_mirrored', 'spare_pool'),
+ ('spare_pool', 'disks'),
+ ('disk_count', 'disks'),
+ ('disk_size', 'disk_size_with_unit')
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.using_vserver_msg = None # This module should be run as cluster admin
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('mirror_disks') is not None and self.parameters.get('disks') is None:
+ self.module.fail_json(msg="mirror_disks require disks options to be set")
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def aggr_get_iter(self, name):
+ """
+ Return aggr-get-iter query results
+ :param name: Name of the aggregate
+ :return: NaElement if aggregate found, None otherwise
+ """
+
+ aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-attributes', **{'aggregate-name': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ aggr_get_iter.add_child_elem(query)
+ result = None
+ try:
+ result = self.server.invoke_successfully(aggr_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13040 denotes an aggregate not being found.
+ if to_native(error.code) == "13040":
+ pass
+ else:
+ msg = to_native(error)
+ if self.using_vserver_msg is not None:
+ msg += '. Added info: %s.' % self.using_vserver_msg
+ self.module.fail_json(msg=msg, exception=traceback.format_exc())
+ return result
+
+ def get_aggr(self, name=None):
+ """
+ Fetch details if aggregate exists.
+ :param name: Name of the aggregate to be fetched
+ :return:
+ Dictionary of current details if aggregate found
+ None if aggregate is not found
+ """
+ if name is None:
+ name = self.parameters['name']
+ aggr_get = self.aggr_get_iter(name)
+ if (aggr_get and aggr_get.get_child_by_name('num-records') and
+ int(aggr_get.get_child_content('num-records')) >= 1):
+ current_aggr = dict()
+ attr = aggr_get.get_child_by_name('attributes-list').get_child_by_name('aggr-attributes')
+ current_aggr['service_state'] = attr.get_child_by_name('aggr-raid-attributes').get_child_content('state')
+ if attr.get_child_by_name('aggr-raid-attributes').get_child_content('disk-count'):
+ current_aggr['disk_count'] = int(attr.get_child_by_name('aggr-raid-attributes').get_child_content('disk-count'))
+ return current_aggr
+ return None
+
+ def disk_get_iter(self, name):
+ """
+ Return storage-disk-get-iter query results
+ Filter disk list by aggregate name, and only reports disk-name and plex-name
+ :param name: Name of the aggregate
+ :return: NaElement
+ """
+
+ disk_get_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ query_details = {
+ 'query': {
+ 'storage-disk-info': {
+ 'disk-raid-info': {
+ 'disk-aggregate-info': {
+ 'aggregate-name': name
+ }
+ }
+ }
+ }
+ }
+ disk_get_iter.translate_struct(query_details)
+ attributes = {
+ 'desired-attributes': {
+ 'storage-disk-info': {
+ 'disk-name': None,
+ 'disk-raid-info': {
+ 'disk_aggregate_info': {
+ 'plex-name': None
+ }
+ }
+ }
+ }
+ }
+ disk_get_iter.translate_struct(attributes)
+
+ result = None
+ try:
+ result = self.server.invoke_successfully(disk_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return result
+
+ def get_aggr_disks(self, name):
+ """
+ Fetch disks that are used for this aggregate.
+ :param name: Name of the aggregate to be fetched
+ :return:
+ list of tuples (disk-name, plex-name)
+ empty list if aggregate is not found
+ """
+ disks = list()
+ aggr_get = self.disk_get_iter(name)
+ if (aggr_get and aggr_get.get_child_by_name('num-records') and
+ int(aggr_get.get_child_content('num-records')) >= 1):
+ attr = aggr_get.get_child_by_name('attributes-list')
+ disks = [(disk_info.get_child_content('disk-name'),
+ disk_info.get_child_by_name('disk-raid-info').get_child_by_name('disk-aggregate-info').get_child_content('plex-name'))
+ for disk_info in attr.get_children()]
+ return disks
+
+ def object_store_get_iter(self, name):
+ """
+ Return aggr-object-store-get query results
+ :return: NaElement if object-store for given aggregate found, None otherwise
+ """
+
+ object_store_get_iter = netapp_utils.zapi.NaElement('aggr-object-store-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'object-store-information', **{'object-store-name': self.parameters.get('object_store_name'),
+ 'aggregate': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ object_store_get_iter.add_child_elem(query)
+ result = None
+ try:
+ result = self.server.invoke_successfully(object_store_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return result
+
+ def get_object_store(self, name):
+ """
+ Fetch details if object store attached to the given aggregate exists.
+ :return:
+ Dictionary of current details if object store attached to the given aggregate is found
+ None if object store is not found
+ """
+ object_store_get = self.object_store_get_iter(name)
+ if (object_store_get and object_store_get.get_child_by_name('num-records') and
+ int(object_store_get.get_child_content('num-records')) >= 1):
+ current_object_store = dict()
+ attr = object_store_get.get_child_by_name('attributes-list').\
+ get_child_by_name('object-store-information')
+ current_object_store['object_store_name'] = attr.get_child_content('object-store-name')
+ return current_object_store
+ return None
+
+ def aggregate_online(self):
+ """
+ Set state of an offline aggregate to online
+ :return: None
+ """
+ online_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-online', **{'aggregate': self.parameters['name'],
+ 'force-online': 'true'})
+ try:
+ self.server.invoke_successfully(online_aggr,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' %
+ (self.parameters['name'], self.parameters['service_state'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def aggregate_offline(self):
+ """
+ Set state of an online aggregate to offline
+ :return: None
+ """
+ offline_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-offline', **{'aggregate': self.parameters['name'],
+ 'force-offline': 'false',
+ 'unmount-volumes': str(self.parameters['unmount_volumes'])})
+ try:
+ self.server.invoke_successfully(offline_aggr, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' %
+ (self.parameters['name'], self.parameters['service_state'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def get_disks_or_mirror_disks_object(name, disks):
+ '''
+ create ZAPI object for disks or mirror_disks
+ '''
+ disks_obj = netapp_utils.zapi.NaElement(name)
+ for disk in disks:
+ disk_info_obj = netapp_utils.zapi.NaElement('disk-info')
+ disk_info_obj.add_new_child('name', disk)
+ disks_obj.add_child_elem(disk_info_obj)
+ return disks_obj
+
+ def create_aggr(self):
+ """
+ Create aggregate
+ :return: None
+ """
+ options = {'aggregate': self.parameters['name']}
+ if self.parameters.get('disk_count'):
+ options['disk-count'] = str(self.parameters['disk_count'])
+ if self.parameters.get('disk_type'):
+ options['disk-type'] = self.parameters['disk_type']
+ if self.parameters.get('raid_size'):
+ options['raid-size'] = str(self.parameters['raid_size'])
+ if self.parameters.get('raid_type'):
+ options['raid-type'] = self.parameters['raid_type']
+ if self.parameters.get('disk_size'):
+ options['disk-size'] = str(self.parameters['disk_size'])
+ if self.parameters.get('disk_size_with_unit'):
+ options['disk-size-with-unit'] = str(self.parameters['disk_size_with_unit'])
+ if self.parameters.get('is_mirrored'):
+ options['is-mirrored'] = str(self.parameters['is_mirrored'])
+ if self.parameters.get('spare_pool'):
+ options['spare-pool'] = self.parameters['spare_pool']
+ if self.parameters.get('raid_type'):
+ options['raid-type'] = self.parameters['raid_type']
+ if self.parameters.get('snaplock_type'):
+ options['snaplock-type'] = self.parameters['snaplock_type']
+ if self.parameters.get('ignore_pool_checks'):
+ options['ignore-pool-checks'] = str(self.parameters['ignore_pool_checks'])
+ aggr_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-create', **options)
+ if self.parameters.get('nodes'):
+ nodes_obj = netapp_utils.zapi.NaElement('nodes')
+ aggr_create.add_child_elem(nodes_obj)
+ for node in self.parameters['nodes']:
+ nodes_obj.add_new_child('node-name', node)
+ if self.parameters.get('disks'):
+ aggr_create.add_child_elem(self.get_disks_or_mirror_disks_object('disks', self.parameters.get('disks')))
+ if self.parameters.get('mirror_disks'):
+ aggr_create.add_child_elem(self.get_disks_or_mirror_disks_object('mirror-disks', self.parameters.get('mirror_disks')))
+
+ try:
+ self.server.invoke_successfully(aggr_create, enable_tunneling=False)
+ if self.parameters.get('wait_for_online'):
+ # round off time_out
+ retries = (self.parameters['time_out'] + 5) / 10
+ current = self.get_aggr()
+ status = None if current is None else current['service_state']
+ while status != 'online' and retries > 0:
+ time.sleep(10)
+ retries = retries - 1
+ current = self.get_aggr()
+ status = None if current is None else current['service_state']
+ else:
+ current = self.get_aggr()
+ if current is not None and current.get('disk_count') != self.parameters.get('disk_count'):
+ self.module.exit_json(changed=self.na_helper.changed,
+ warnings="Aggregate created with mismatched disk_count: created %s not %s"
+ % (current.get('disk_count'), self.parameters.get('disk_count')))
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error provisioning aggregate %s: %s"
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_aggr(self):
+ """
+ Delete aggregate.
+ :return: None
+ """
+ aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-destroy', **{'aggregate': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(aggr_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_aggregate(self):
+ """
+ Rename aggregate.
+ """
+ aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-rename', **{'aggregate': self.parameters['from_name'],
+ 'new-aggregate-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(aggr_rename, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error renaming aggregate %s: %s"
+ % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_aggr(self, modify):
+ """
+ Modify state of the aggregate
+ :param modify: dictionary of parameters to be modified
+ :return: None
+ """
+ if modify.get('service_state') == 'offline':
+ self.aggregate_offline()
+ else:
+ disk_size = 0
+ disk_size_with_unit = None
+ if modify.get('service_state') == 'online':
+ self.aggregate_online()
+ if modify.get('disk_size'):
+ disk_size = modify.get('disk_size')
+ if modify.get('disk_size_with_unit'):
+ disk_size_with_unit = modify.get('disk_size_with_unit')
+ if modify.get('disk_count'):
+ self.add_disks(modify['disk_count'], disk_size=disk_size, disk_size_with_unit=disk_size_with_unit)
+ if modify.get('disks_to_add') or modify.get('mirror_disks_to_add'):
+ self.add_disks(0, modify.get('disks_to_add'), modify.get('mirror_disks_to_add'))
+
+ def attach_object_store_to_aggr(self):
+ """
+ Attach object store to aggregate.
+ :return: None
+ """
+ attach_object_store = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-object-store-attach', **{'aggregate': self.parameters['name'],
+ 'object-store-name': self.parameters['object_store_name']})
+
+ try:
+ self.server.invoke_successfully(attach_object_store,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error attaching object store %s to aggregate %s: %s" %
+ (self.parameters['object_store_name'], self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_disks(self, count=0, disks=None, mirror_disks=None, disk_size=0, disk_size_with_unit=None):
+ """
+ Add additional disks to aggregate.
+ :return: None
+ """
+ options = {'aggregate': self.parameters['name']}
+ if count:
+ options['disk-count'] = str(count)
+ if disks and self.parameters.get('ignore_pool_checks'):
+ options['ignore-pool-checks'] = str(self.parameters['ignore_pool_checks'])
+ if disk_size:
+ options['disk-size'] = str(disk_size)
+ if disk_size_with_unit:
+ options['disk-size-with-unit'] = disk_size_with_unit
+ aggr_add = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-add', **options)
+ if disks:
+ aggr_add.add_child_elem(self.get_disks_or_mirror_disks_object('disks', disks))
+ if mirror_disks:
+ aggr_add.add_child_elem(self.get_disks_or_mirror_disks_object('mirror-disks', mirror_disks))
+
+ try:
+ self.server.invoke_successfully(aggr_add,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding additional disks to aggregate %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ cserver = netapp_utils.get_cserver(self.server)
+ if cserver is None:
+ server = self.server
+ self.using_vserver_msg = netapp_utils.ERROR_MSG['no_cserver']
+ event_name += ':error_no_cserver'
+ else:
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=cserver)
+ netapp_utils.ems_log_event(event_name, server)
+
+ def map_plex_to_primary_and_mirror(self, plex_disks, disks, mirror_disks):
+ '''
+ we have N plexes, and disks, and maybe mirror_disks
+ we're trying to find which plex is used for disks, and which one, if applicable, for mirror_disks
+ :return: a tuple with the names of the two plexes (disks_plex, mirror_disks_plex)
+ the second one can be None
+ '''
+ disks_plex = None
+ mirror_disks_plex = None
+ error = None
+ for plex in plex_disks:
+ common = set(plex_disks[plex]).intersection(set(disks))
+ if common:
+ if disks_plex is None:
+ disks_plex = plex
+ else:
+ error = 'found overlapping plexes: %s and %s' % (disks_plex, plex)
+ if mirror_disks is not None:
+ common = set(plex_disks[plex]).intersection(set(mirror_disks))
+ if common:
+ if mirror_disks_plex is None:
+ mirror_disks_plex = plex
+ else:
+ error = 'found overlapping mirror plexes: %s and %s' % (mirror_disks_plex, plex)
+ if error is None:
+ # make sure we found a match
+ if disks_plex is None:
+ error = 'cannot not match disks with current aggregate disks'
+ if mirror_disks is not None and mirror_disks_plex is None:
+ if error is not None:
+ error += ', and '
+ error = 'cannot not match mirror_disks with current aggregate disks'
+ if error:
+ self.module.fail_json(msg="Error mapping disks for aggregate %s: %s. Found: %s" %
+ (self.parameters['name'], error, str(plex_disks)))
+ return disks_plex, mirror_disks_plex
+
+ def get_disks_to_add(self, aggr_name, disks, mirror_disks):
+ '''
+ Get list of disks used by the aggregate, as primary and mirror.
+ Report error if:
+ the plexes in use cannot be matched with user inputs (we expect some overlap)
+ the user request requires some disks to be removed (not supported)
+ : return: a tuple of two lists of disks: disks_to_add, mirror_disks_to_add
+ '''
+ # let's see if we need to add disks
+ disks_in_use = self.get_aggr_disks(aggr_name)
+ # we expect a list of tuples (disk_name, plex_name), if there is a mirror, we should have 2 plexes
+ # let's get a list of disks for each plex
+ plex_disks = dict()
+ for disk_name, plex_name in disks_in_use:
+ plex_disks.setdefault(plex_name, []).append(disk_name)
+ # find who is who
+ disks_plex, mirror_disks_plex = self.map_plex_to_primary_and_mirror(plex_disks, disks, mirror_disks)
+ # Now that we know what is which, find what needs to be removed (error), and what needs to be added
+ disks_to_remove = [disk for disk in plex_disks[disks_plex] if disk not in disks]
+ if mirror_disks_plex:
+ disks_to_remove.extend([disk for disk in plex_disks[mirror_disks_plex] if disk not in mirror_disks])
+ if disks_to_remove:
+ error = 'these disks cannot be removed: %s' % str(disks_to_remove)
+ self.module.fail_json(msg="Error removing disks is not supported. Aggregate %s: %s. In use: %s" %
+ (aggr_name, error, str(plex_disks)))
+ # finally, what's to be added
+ disks_to_add = [disk for disk in disks if disk not in plex_disks[disks_plex]]
+ mirror_disks_to_add = list()
+ if mirror_disks_plex:
+ mirror_disks_to_add = [disk for disk in mirror_disks if disk not in plex_disks[mirror_disks_plex]]
+ if mirror_disks_to_add and not disks_to_add:
+ self.module.fail_json(msg="Error cannot add mirror disks %s without adding disks for aggregate %s. In use: %s" %
+ (str(mirror_disks_to_add), aggr_name, str(plex_disks)))
+ if disks_to_add or mirror_disks_to_add:
+ self.na_helper.changed = True
+
+ return disks_to_add, mirror_disks_to_add
+
+ def apply(self):
+ """
+ Apply action to the aggregate
+ :return: None
+ """
+ self.asup_log_for_cserver("na_ontap_aggregate")
+ object_store_cd_action = None
+ aggr_name = self.parameters['name']
+ current = self.get_aggr()
+ # rename and create are mutually exclusive
+ rename, cd_action, object_store_current = None, None, None
+ if self.parameters.get('from_name'):
+ old_aggr = self.get_aggr(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_aggr, current)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming: aggregate %s does not exist" % self.parameters['from_name'])
+ if rename:
+ current = old_aggr
+ aggr_name = self.parameters['from_name']
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if cd_action is None and self.parameters.get('disks') and current is not None:
+ modify['disks_to_add'], modify['mirror_disks_to_add'] = \
+ self.get_disks_to_add(aggr_name, self.parameters['disks'], self.parameters.get('mirror_disks'))
+
+ if modify.get('disk_count'):
+ if int(modify['disk_count']) < int(current['disk_count']):
+ self.module.fail_json(msg="specified disk_count is less than current disk_count. Only adding_disk is allowed.")
+ else:
+ modify['disk_count'] = modify['disk_count'] - current['disk_count']
+
+ if self.parameters.get('object_store_name'):
+ object_store_current = None
+ if current:
+ object_store_current = self.get_object_store(aggr_name)
+ object_store_cd_action = self.na_helper.get_cd_action(object_store_current, self.parameters.get('object_store_name'))
+ if object_store_cd_action is None and object_store_current is not None\
+ and object_store_current['object_store_name'] != self.parameters.get('object_store_name'):
+ self.module.fail_json(msg='Error: object store %s is already associated with aggregate %s.' %
+ (object_store_current['object_store_name'], aggr_name))
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_aggr()
+ elif cd_action == 'delete':
+ self.delete_aggr()
+ else:
+ if rename:
+ self.rename_aggregate()
+ if modify:
+ self.modify_aggr(modify)
+ if object_store_cd_action == 'create':
+ self.attach_object_store_to_aggr()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create Aggregate class instance and invoke apply
+ :return: None
+ """
+ obj_aggr = NetAppOntapAggregate()
+ obj_aggr.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py
new file mode 100644
index 00000000..b5c37dc8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+"""
+create Autosupport module to enable, disable or modify
+"""
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = """
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Enable/Disable Autosupport"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_autosupport
+options:
+ state:
+ description:
+ - Specifies whether the AutoSupport daemon is present or absent.
+ - When this setting is absent, delivery of all AutoSupport messages is turned off.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ node_name:
+ description:
+ - The name of the filer that owns the AutoSupport Configuration.
+ required: true
+ type: str
+ transport:
+ description:
+ - The name of the transport protocol used to deliver AutoSupport messages
+ choices: ['http', 'https', 'smtp']
+ type: str
+ noteto:
+ description:
+ - Specifies up to five recipients of short AutoSupport e-mail messages.
+ elements: str
+ type: list
+ post_url:
+ description:
+ - The URL used to deliver AutoSupport messages via HTTP POST
+ type: str
+ mail_hosts:
+ description:
+ - List of mail server(s) used to deliver AutoSupport messages via SMTP.
+ - Both host names and IP addresses may be used as valid input.
+ type: list
+ elements: str
+ support:
+ description:
+ - Specifies whether AutoSupport notification to technical support is enabled.
+ type: bool
+ from_address:
+ description:
+ - specify the e-mail address from which the node sends AutoSupport messages
+ version_added: 2.8.0
+ type: str
+ partner_addresses:
+ description:
+ - Specifies up to five partner vendor recipients of full AutoSupport e-mail messages.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ to_addresses:
+ description:
+ - Specifies up to five recipients of full AutoSupport e-mail messages.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ proxy_url:
+ description:
+ - specify an HTTP or HTTPS proxy if the 'transport' parameter is set to HTTP or HTTPS and your organization uses a proxy.
+ - If authentication is required, use the format "username:password@host:port".
+ version_added: 2.8.0
+ type: str
+ hostname_in_subject:
+ description:
+ - Specify whether the hostname of the node is included in the subject line of the AutoSupport message.
+ type: bool
+ version_added: 2.8.0
+short_description: NetApp ONTAP Autosupport
+version_added: 2.7.0
+
+"""
+
+EXAMPLES = """
+ - name: Enable autosupport
+ na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ noteto: abc@def.com,def@ghi.com
+ mail_hosts: 1.2.3.4,5.6.7.8
+ support: False
+ post_url: url/1.0/post
+
+ - name: Modify autosupport proxy_url with password
+ na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ proxy_url: username:password@host.com:8000
+
+ - name: Modify autosupport proxy_url without password
+ na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ proxy_url: username@host.com:8000
+
+ - name: Disable autosupport
+ na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ node_name: test
+
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPasup(object):
+ """Class with autosupport methods"""
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ node_name=dict(required=True, type='str'),
+ transport=dict(required=False, type='str', choices=['smtp', 'http', 'https']),
+ noteto=dict(required=False, type='list', elements='str'),
+ post_url=dict(required=False, type='str'),
+ support=dict(required=False, type='bool'),
+ mail_hosts=dict(required=False, type='list', elements='str'),
+ from_address=dict(required=False, type='str'),
+ partner_addresses=dict(required=False, type='list', elements='str'),
+ to_addresses=dict(required=False, type='list', elements='str'),
+ proxy_url=dict(required=False, type='str'),
+ hostname_in_subject=dict(required=False, type='bool'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # present or absent requires modifying state to enabled or disabled
+ self.parameters['service_state'] = 'started' if self.parameters['state'] == 'present' else 'stopped'
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'node_name': 'node-name',
+ 'transport': 'transport',
+ 'post_url': 'post-url',
+ 'from_address': 'from',
+ 'proxy_url': 'proxy-url'
+ }
+ self.na_helper.zapi_list_keys = {
+ 'noteto': ('noteto', 'mail-address'),
+ 'mail_hosts': ('mail-hosts', 'string'),
+ 'partner_addresses': ('partner-address', 'mail-address'),
+ 'to_addresses': ('to', 'mail-address'),
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'support': 'is-support-enabled',
+ 'hostname_in_subject': 'is-node-in-subject'
+ }
+
+ def get_autosupport_config(self):
+ """
+ Invoke zapi - get current autosupport details
+ :return: dict()
+ """
+ asup_details = netapp_utils.zapi.NaElement('autosupport-config-get')
+ asup_details.add_new_child('node-name', self.parameters['node_name'])
+ asup_info = dict()
+ try:
+ result = self.server.invoke_successfully(asup_details, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='%s' % to_native(error),
+ exception=traceback.format_exc())
+ # zapi invoke successful
+ asup_attr_info = result.get_child_by_name('attributes').get_child_by_name('autosupport-config-info')
+ asup_info['service_state'] = 'started' if asup_attr_info['is-enabled'] == 'true' else 'stopped'
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ asup_info[item_key] = asup_attr_info[zapi_key]
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ asup_info[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=asup_attr_info[zapi_key])
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ asup_info[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=asup_attr_info.get_child_by_name(parent)
+ )
+ return asup_info
+
+ def modify_autosupport_config(self, modify):
+ """
+ Invoke zapi - modify autosupport config
+ @return: NaElement object / FAILURE with an error_message
+ """
+ asup_details = {'node-name': self.parameters['node_name']}
+ if modify.get('service_state'):
+ asup_details['is-enabled'] = 'true' if modify.get('service_state') == 'started' else 'false'
+ asup_config = netapp_utils.zapi.NaElement('autosupport-config-modify')
+ for item_key in modify:
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ asup_details[zapi_key] = modify[item_key]
+ elif item_key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item_key)
+ asup_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
+ value=modify[item_key])
+ elif item_key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key)
+ asup_config.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent=parent_key,
+ zapi_child=child_key,
+ data=modify.get(item_key)))
+ asup_config.translate_struct(asup_details)
+ try:
+ return self.server.invoke_successfully(asup_config, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='%s' % to_native(error), exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_autosupport", cserver)
+
+ def apply(self):
+ """
+ Apply action to autosupport
+ """
+ current = self.get_autosupport_config()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.modify_autosupport_config(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ asup_obj = NetAppONTAPasup()
+ asup_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
new file mode 100644
index 00000000..94be8ce5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_dns
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_autosupport_invoke
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP send AutoSupport message
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Send an AutoSupport message from a node
+
+options:
+
+ name:
+ description:
+ - The name of the node to send the message to.
+ - Not specifying this option invokes AutoSupport on all nodes in the cluster.
+ type: str
+
+ autosupport_message:
+ description:
+ - Text sent in the subject line of the AutoSupport message.
+ type: str
+ aliases:
+ - message
+ version_added: 20.8.0
+
+ type:
+ description:
+ - Type of AutoSupport Collection to Issue.
+ choices: ['test', 'performance', 'all']
+ default: 'all'
+ type: str
+
+ uri:
+ description:
+ - send the AutoSupport message to the destination you specify instead of the configured destination.
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Send message
+ na_ontap_autosupport_invoke:
+ name: node1
+ message: invoked test autosupport rest
+ uri: http://1.2.3.4/delivery_uri
+ type: test
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPasupInvoke(object):
+ ''' send ASUP message '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=False, type='str'),
+ autosupport_message=dict(required=False, type='str', aliases=["message"]),
+ type=dict(required=False, choices=[
+ 'test', 'performance', 'all'], default='all'),
+ uri=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API should be used for ONTAP 9.6 or higher.
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if not HAS_NETAPP_LIB:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_nodes(self):
+ nodes = list()
+ node_obj = netapp_utils.zapi.NaElement('system-node-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ node_details_info = netapp_utils.zapi.NaElement('node-details-info')
+ node_details_info.add_new_child('node', '')
+ desired_attributes.add_child_elem(node_details_info)
+ node_obj.add_child_elem(desired_attributes)
+ try:
+ result = self.server.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ node_info = result.get_child_by_name('attributes-list')
+ if node_info is not None:
+ nodes = [node_details.get_child_content('node') for node_details in node_info.get_children()]
+ return nodes
+
+ def send_zapi_message(self, params, node_name):
+ params['node-name'] = node_name
+ send_message = netapp_utils.zapi.NaElement.create_node_with_children('autosupport-invoke', **params)
+ try:
+ self.server.invoke_successfully(send_message, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
+ % (node_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def send_message(self):
+ params = dict()
+ if self.parameters.get('autosupport_message'):
+ params['message'] = self.parameters['autosupport_message']
+ if self.parameters.get('type'):
+ params['type'] = self.parameters['type']
+ if self.parameters.get('uri'):
+ params['uri'] = self.parameters['uri']
+
+ if self.use_rest:
+ if self.parameters.get('name'):
+ params['node.name'] = self.parameters['name']
+ node_name = params['node.name']
+ else:
+ node_name = '*'
+ api = 'support/autosupport/messages'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
+ % (node_name, error))
+ else:
+ if self.parameters.get('name'):
+ node_names = [self.parameters['name']]
+ else:
+ # simulate REST behavior by sending to all nodes in the cluster
+ node_names = self.get_nodes()
+ for name in node_names:
+ self.send_zapi_message(params, name)
+
+ def ems_log_event(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ return netapp_utils.ems_log_event("na_ontap_autosupport_invoke", cserver)
+
+ def apply(self):
+ if not self.use_rest:
+ self.ems_log_event()
+ if self.module.check_mode:
+ pass
+ else:
+ self.send_message()
+ self.module.exit_json(changed=True)
+
+
+def main():
+ message = NetAppONTAPasupInvoke()
+ message.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py
new file mode 100644
index 00000000..646ba410
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_broadcast_domain
+short_description: NetApp ONTAP manage broadcast domains.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify a ONTAP broadcast domain.
+options:
+ state:
+ description:
+ - Whether the specified broadcast domain should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - Specify the broadcast domain name.
+ required: true
+ aliases:
+ - broadcast_domain
+ type: str
+ from_name:
+ description:
+ - Specify the broadcast domain name to be split into new broadcast domain.
+ version_added: 2.8.0
+ type: str
+ mtu:
+ description:
+ - Specify the required mtu for the broadcast domain.
+ type: str
+ ipspace:
+ description:
+ - Specify the required ipspace for the broadcast domain.
+ - A domain ipspace can not be modified after the domain has been created.
+ type: str
+ ports:
+ description:
+ - Specify the ports associated with this broadcast domain. Should be comma separated.
+ - It represents the expected state of a list of ports at any time.
+ - Add a port if it is specified in expected state but not in current state.
+ - Delete a port if it is specified in current state but not in expected state.
+ - For split action, it represents the ports to be split from current broadcast domain and added to the new broadcast domain.
+ - if all ports are removed or split from a broadcast domain, the broadcast domain will be deleted automatically.
+ type: list
+ elements: str
+'''
+
+EXAMPLES = """
+ - name: create broadcast domain
+ na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ mtu: 1000
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"]
+ - name: modify broadcast domain
+ na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ mtu: 1100
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"]
+ - name: split broadcast domain
+ na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ from_name: ansible_domain
+ name: new_ansible_domain
+ mtu: 1200
+ ipspace: Default
+ ports: khutton-vsim1:e0d-12
+ - name: delete broadcast domain
+ na_ontap_broadcast_domain:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ ipspace: Default
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapBroadcastDomain(object):
+ """
+ Create, Modifies and Destroys a Broadcast domain
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Broadcast Domain class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', aliases=["broadcast_domain"]),
+ ipspace=dict(required=False, type='str'),
+ mtu=dict(required=False, type='str'),
+ ports=dict(required=False, type='list', elements='str'),
+ from_name=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_broadcast_domain(self, broadcast_domain=None):
+ """
+ Return details about the broadcast domain
+ :param broadcast_domain: specific broadcast domain to get.
+ :return: Details about the broadcast domain. None if not found.
+ :rtype: dict
+ """
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ domain_exists = None
+ # check if broadcast_domain exists
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').\
+ get_child_by_name('net-port-broadcast-domain-info')
+ domain_name = domain_info.get_child_content('broadcast-domain')
+ domain_mtu = domain_info.get_child_content('mtu')
+ domain_ipspace = domain_info.get_child_content('ipspace')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ else:
+ ports = []
+ domain_exists = {
+ 'domain-name': domain_name,
+ 'mtu': domain_mtu,
+ 'ipspace': domain_ipspace,
+ 'ports': ports
+ }
+ return domain_exists
+
+ def create_broadcast_domain(self):
+ """
+ Creates a new broadcast domain
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-create')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if self.parameters.get('mtu'):
+ domain_obj.add_new_child("mtu", self.parameters['mtu'])
+ if self.parameters.get('ports'):
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in self.parameters['ports']:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_broadcast_domain(self, broadcast_domain=None):
+ """
+ Deletes a broadcast domain
+ """
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-destroy')
+ domain_obj.add_new_child("broadcast-domain", broadcast_domain)
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting broadcast domain %s: %s' %
+ (broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_broadcast_domain(self):
+ """
+ Modifies ipspace and mtu options of a broadcast domain
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-modify')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('mtu'):
+ domain_obj.add_new_child("mtu", self.parameters['mtu'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def split_broadcast_domain(self):
+ """
+ split broadcast domain
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-split')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['from_name'])
+ domain_obj.add_new_child("new-broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ports'):
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in self.parameters['ports']:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error splitting broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if len(self.get_broadcast_domain_ports(self.parameters['from_name'])) == 0:
+ self.delete_broadcast_domain(self.parameters['from_name'])
+
+ def modify_redirect(self, modify):
+ """
+ :param modify: modify attributes.
+ """
+ for attribute in modify.keys():
+ if attribute == 'mtu':
+ self.modify_broadcast_domain()
+ if attribute == 'ports':
+ self.modify_broadcast_domain_ports()
+
+ def get_modify_attributes(self, current, split):
+ """
+ :param current: current state.
+ :param split: True or False of split action.
+ :return: list of modified attributes.
+ """
+ modify = None
+ if self.parameters['state'] == 'present':
+ # split already handled ipspace and ports.
+ if self.parameters.get('from_name'):
+ current = self.get_broadcast_domain(self.parameters['from_name'])
+ if split:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify.get('ipspace'):
+ del modify['ipspace']
+ if modify.get('ports'):
+ del modify['ports']
+ # ipspace can not be modified.
+ else:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify.get('ipspace'):
+ self.module.fail_json(msg='A domain ipspace can not be modified after the domain has been created.',
+ exception=traceback.format_exc())
+ return modify
+
+ def modify_broadcast_domain_ports(self):
+ """
+ compare current and desire ports. Call add or remove ports methods if needed.
+ :return: None.
+ """
+ current_ports = self.get_broadcast_domain_ports()
+ expect_ports = self.parameters['ports']
+ # if want to remove all ports, simply delete the broadcast domain.
+ if len(expect_ports) == 0:
+ self.delete_broadcast_domain()
+ return
+ ports_to_remove = list(set(current_ports) - set(expect_ports))
+ ports_to_add = list(set(expect_ports) - set(current_ports))
+
+ if len(ports_to_add) > 0:
+ self.add_broadcast_domain_ports(ports_to_add)
+
+ if len(ports_to_remove) > 0:
+ self.delete_broadcast_domain_ports(ports_to_remove)
+
+ def add_broadcast_domain_ports(self, ports):
+ """
+ Creates new broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ :param: ports to be deleted.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_broadcast_domain_ports(self, broadcast_domain=None):
+ """
+ Return details about the broadcast domain ports.
+ :return: Details about the broadcast domain ports. None if not found.
+ :rtype: list
+ """
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ ports = []
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ return ports
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ self.asup_log_for_cserver("na_ontap_broadcast_domain")
+ current = self.get_broadcast_domain()
+ cd_action, split = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create':
+ # either create new domain or split domain.
+ if self.parameters.get('from_name'):
+ split = self.na_helper.is_rename_action(self.get_broadcast_domain(self.parameters['from_name']), current)
+ if split is None:
+ self.module.fail_json(msg='A domain can not be split if it does not exist.',
+ exception=traceback.format_exc())
+ if split:
+ cd_action = None
+ modify = self.get_modify_attributes(current, split)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if split:
+ self.split_broadcast_domain()
+ if cd_action == 'create':
+ self.create_broadcast_domain()
+ elif cd_action == 'delete':
+ self.delete_broadcast_domain()
+ elif modify:
+ self.modify_redirect(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ """
+ Creates the NetApp ONTAP Broadcast Domain Object that can be created, deleted and modified.
+ """
+ obj = NetAppOntapBroadcastDomain()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py
new file mode 100644
index 00000000..6a67beb5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_broadcast_domain_ports
+short_description: NetApp ONTAP manage broadcast domain ports
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add or remove ONTAP broadcast domain ports. Existing ports that are not listed are kept.
+options:
+ state:
+ description:
+ - Whether the specified broadcast domain should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ broadcast_domain:
+ description:
+ - Specify the broadcast_domain name
+ required: true
+ type: str
+ ipspace:
+ description:
+ - Specify the ipspace for the broadcast domain
+ type: str
+ ports:
+ description:
+ - Specify the list of ports to add to or remove from this broadcast domain.
+ required: true
+ type: list
+ elements: str
+
+'''
+
+EXAMPLES = """
+ - name: create broadcast domain ports
+ na_ontap_broadcast_domain_ports:
+ state=present
+ username={{ netapp_username }}
+ password={{ netapp_password }}
+ hostname={{ netapp_hostname }}
+ broadcast_domain=123kevin
+ ports=khutton-vsim1:e0d-13
+ - name: delete broadcast domain ports
+ na_ontap_broadcast_domain_ports:
+ state=absent
+ username={{ netapp_username }}
+ password={{ netapp_password }}
+ hostname={{ netapp_hostname }}
+ broadcast_domain=123kevin
+ ports=khutton-vsim1:e0d-13
+"""
+
+RETURN = """
+
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapBroadcastDomainPorts(object):
+ """
+ Create and Destroys Broadcast Domain Ports
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap Net Route class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ broadcast_domain=dict(required=True, type='str'),
+ ipspace=dict(required=False, type='str', default=None),
+ ports=dict(required=True, type='list', elements='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.state = parameters['state']
+ self.broadcast_domain = parameters['broadcast_domain']
+ self.ipspace = parameters['ipspace']
+ self.ports = parameters['ports']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_broadcast_domain_ports(self):
+ """
+ Return details about the broadcast domain ports
+ :param:
+ name : broadcast domain name
+ :return: Details about the broadcast domain. None if not found.
+ :rtype: dict
+ """
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', self.broadcast_domain)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ domain_exists = None
+ # check if broadcast domain exists
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_name = domain_info.get_child_content('broadcast-domain')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ else:
+ ports = []
+ domain_exists = {
+ 'domain-name': domain_name,
+ 'ports': ports
+ }
+ return domain_exists
+
+ def create_broadcast_domain_ports(self, ports):
+ """
+ Creates new broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.broadcast_domain)
+ if self.ipspace:
+ domain_obj.add_new_child("ipspace", self.ipspace)
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' %
+ (self.broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.broadcast_domain)
+ if self.ipspace:
+ domain_obj.add_new_child("ipspace", self.ipspace)
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' %
+ (self.broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ changed = False
+ broadcast_domain_details = self.get_broadcast_domain_ports()
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_broadcast_domain_ports", cserver)
+ if broadcast_domain_details is None:
+ self.module.fail_json(msg='Error broadcast domain not found: %s' % self.broadcast_domain)
+ if self.state == 'present': # execute create
+ ports_to_add = [port for port in self.ports if port not in broadcast_domain_details['ports']]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ self.create_broadcast_domain_ports(ports_to_add)
+ changed = True
+ elif self.state == 'absent': # execute delete
+ ports_to_delete = [port for port in self.ports if port in broadcast_domain_details['ports']]
+ if len(ports_to_delete) > 0:
+ if not self.module.check_mode:
+ self.delete_broadcast_domain_ports(ports_to_delete)
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapBroadcastDomainPorts()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py
new file mode 100644
index 00000000..bf0c035e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP manage consistency group snapshot
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create consistency group snapshot for ONTAP volumes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cg_snapshot
+options:
+ state:
+ description:
+ - If you want to create a snapshot.
+ default: present
+ type: str
+ vserver:
+ required: true
+ type: str
+ description:
+ - Name of the vserver.
+ volumes:
+ required: true
+ type: list
+ elements: str
+ description:
+ - A list of volumes in this filer that is part of this CG operation.
+ snapshot:
+ required: true
+ type: str
+ description:
+ - The provided name of the snapshot that is created in each volume.
+ timeout:
+ description:
+ - Timeout selector.
+ choices: ['urgent', 'medium', 'relaxed']
+ type: str
+ default: medium
+ snapmirror_label:
+ description:
+ - A human readable SnapMirror label to be attached with the consistency group snapshot copies.
+ type: str
+version_added: 2.7.0
+
+'''
+
+EXAMPLES = """
+ - name:
+ na_ontap_cg_snapshot:
+ state: present
+ vserver: vserver_name
+ snapshot: snapshot name
+ volumes: vol_name
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCGSnapshot(object):
+ """
+ Methods to create CG snapshots
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ vserver=dict(required=True, type='str'),
+ volumes=dict(required=True, type='list', elements='str'),
+ snapshot=dict(required=True, type='str'),
+ timeout=dict(required=False, type='str', choices=[
+ 'urgent', 'medium', 'relaxed'], default='medium'),
+ snapmirror_label=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ parameters = self.module.params
+
+ # set up variables
+ self.state = parameters['state']
+ self.vserver = parameters['vserver']
+ self.volumes = parameters['volumes']
+ self.snapshot = parameters['snapshot']
+ self.timeout = parameters['timeout']
+ self.snapmirror_label = parameters['snapmirror_label']
+ self.cgid = None
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.vserver)
+
+ def does_snapshot_exist(self, volume):
+ """
+ This is duplicated from na_ontap_snapshot
+ Checks to see if a snapshot exists or not
+ :return: Return True if a snapshot exists, false if it dosn't
+ """
+ # TODO: Remove this method and import snapshot module and
+ # call get after re-factoring __init__ across all the modules
+ # we aren't importing now, since __init__ does a lot of Ansible setup
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter")
+ desired_attr = netapp_utils.zapi.NaElement("desired-attributes")
+ snapshot_info = netapp_utils.zapi.NaElement('snapshot-info')
+ comment = netapp_utils.zapi.NaElement('comment')
+ # add more desired attributes that are allowed to be modified
+ snapshot_info.add_child_elem(comment)
+ desired_attr.add_child_elem(snapshot_info)
+ snapshot_obj.add_child_elem(desired_attr)
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.snapshot)
+ snapshot_info_obj.add_new_child("volume", volume)
+ snapshot_info_obj.add_new_child("vserver", self.vserver)
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ snap_info = attributes_list.get_child_by_name('snapshot-info')
+ return_value = {'comment': snap_info.get_child_content('comment')}
+ return return_value
+
+ def cgcreate(self):
+ """
+ Calls cg-start and cg-commit (when cg-start succeeds)
+ """
+ started = self.cg_start()
+ if started:
+ if self.cgid is not None:
+ self.cg_commit()
+ else:
+ self.module.fail_json(msg="Error fetching CG ID for CG commit %s" % self.snapshot,
+ exception=traceback.format_exc())
+ return started
+
+ def cg_start(self):
+ """
+ For the given list of volumes, creates cg-snapshot
+ """
+ snapshot_started = False
+ cgstart = netapp_utils.zapi.NaElement("cg-start")
+ cgstart.add_new_child("snapshot", self.snapshot)
+ cgstart.add_new_child("timeout", self.timeout)
+ volume_list = netapp_utils.zapi.NaElement("volumes")
+ cgstart.add_child_elem(volume_list)
+ for vol in self.volumes:
+ snapshot_exists = self.does_snapshot_exist(vol)
+ if snapshot_exists is None:
+ snapshot_started = True
+ volume_list.add_new_child("volume-name", vol)
+ if snapshot_started:
+ if self.snapmirror_label:
+ cgstart.add_new_child("snapmirror-label",
+ self.snapmirror_label)
+ try:
+ cgresult = self.server.invoke_successfully(
+ cgstart, enable_tunneling=True)
+ if cgresult.get_child_by_name('cg-id'):
+ self.cgid = cgresult['cg-id']
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating CG snapshot %s: %s" %
+ (self.snapshot, to_native(error)),
+ exception=traceback.format_exc())
+ return snapshot_started
+
+ def cg_commit(self):
+ """
+ When cg-start is successful, performs a cg-commit with the cg-id
+ """
+ cgcommit = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cg-commit', **{'cg-id': self.cgid})
+ try:
+ self.server.invoke_successfully(cgcommit,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error committing CG snapshot %s: %s" %
+ (self.snapshot, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Applies action from playbook'''
+ netapp_utils.ems_log_event("na_ontap_cg_snapshot", self.server)
+ if not self.module.check_mode:
+ changed = self.cgcreate()
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ '''Execute action from playbook'''
+ cg_obj = NetAppONTAPCGSnapshot()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py
new file mode 100644
index 00000000..dbf565a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# import untangle
+
+'''
+na_ontap_cifs
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create or destroy or modify(path) cifs-share on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cifs
+
+options:
+
+ path:
+ description:
+ The file system path that is shared through this CIFS share. The path is the full, user visible path relative
+ to the vserver root, and it might be crossing junction mount points. The path is in UTF8 and uses forward
+ slash as directory separator
+ required: false
+ type: str
+
+ vserver:
+ description:
+ - "Vserver containing the CIFS share."
+ required: true
+ type: str
+
+ share_name:
+ description:
+ The name of the CIFS share. The CIFS share name is a UTF-8 string with the following characters being
+ illegal; control characters from 0x00 to 0x1F, both inclusive, 0x22 (double quotes)
+ required: true
+ type: str
+
+ share_properties:
+ description:
+ - The list of properties for the CIFS share
+ required: false
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ symlink_properties:
+ description:
+ - The list of symlink properties for this CIFS share
+ required: false
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified CIFS share should exist or not."
+ required: false
+ type: str
+ default: present
+
+ vscan_fileop_profile:
+ choices: ['no_scan', 'standard', 'strict', 'writes_only']
+ description:
+ - Profile_set of file_ops to which vscan on access scanning is applicable.
+ required: false
+ type: str
+ version_added: 2.9.0
+
+short_description: NetApp ONTAP Manage cifs-share
+version_added: 2.6.0
+
+'''
+
+EXAMPLES = """
+ - name: Create CIFS share
+ na_ontap_cifs:
+ state: present
+ share_name: cifsShareName
+ path: /
+ vserver: vserverName
+ share_properties: browsable,oplocks
+ symlink_properties: read_only,enable
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete CIFS share
+ na_ontap_cifs:
+ state: absent
+ share_name: cifsShareName
+ vserver: vserverName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Modify path CIFS share
+ na_ontap_cifs:
+ state: present
+ share_name: pb_test
+ vserver: vserverName
+ path: /
+ share_properties: show_previous_versions
+ symlink_properties: disable
+ vscan_fileop_profile: no_scan
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCifsShare(object):
+ """
+ Methods to create/delete/modify(path) CIFS share
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ share_name=dict(required=True, type='str'),
+ path=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ share_properties=dict(required=False, type='list', elements='str'),
+ symlink_properties=dict(required=False, type='list', elements='str'),
+ vscan_fileop_profile=dict(required=False, type='str', choices=['no_scan', 'standard', 'strict', 'writes_only'])
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters.get('vserver'))
+
+ def get_cifs_share(self):
+ """
+ Return details about the cifs-share
+ :param:
+ name : Name of the cifs-share
+ :return: Details about the cifs-share. None if not found.
+ :rtype: dict
+ """
+ cifs_iter = netapp_utils.zapi.NaElement('cifs-share-get-iter')
+ cifs_info = netapp_utils.zapi.NaElement('cifs-share')
+ cifs_info.add_new_child('share-name', self.parameters.get('share_name'))
+ cifs_info.add_new_child('vserver', self.parameters.get('vserver'))
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_info)
+
+ cifs_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(cifs_iter, True)
+
+ return_value = None
+ # check if query returns the expected cifs-share
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ properties_list = []
+ symlink_list = []
+ cifs_attrs = result.get_child_by_name('attributes-list').\
+ get_child_by_name('cifs-share')
+ if cifs_attrs.get_child_by_name('share-properties'):
+ properties_attrs = cifs_attrs['share-properties']
+ if properties_attrs is not None:
+ properties_list = [property.get_content() for property in properties_attrs.get_children()]
+ if cifs_attrs.get_child_by_name('symlink-properties'):
+ symlink_attrs = cifs_attrs['symlink-properties']
+ if symlink_attrs is not None:
+ symlink_list = [symlink.get_content() for symlink in symlink_attrs.get_children()]
+ return_value = {
+ 'share': cifs_attrs.get_child_content('share-name'),
+ 'path': cifs_attrs.get_child_content('path'),
+ 'share_properties': properties_list,
+ 'symlink_properties': symlink_list
+ }
+ if cifs_attrs.get_child_by_name('vscan-fileop-profile'):
+ return_value['vscan_fileop_profile'] = cifs_attrs['vscan-fileop-profile']
+
+ return return_value
+
+ def create_cifs_share(self):
+ """
+ Create CIFS share
+ """
+ options = {'share-name': self.parameters.get('share_name'),
+ 'path': self.parameters.get('path')}
+ cifs_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-create', **options)
+ if self.parameters.get('share_properties'):
+ property_attrs = netapp_utils.zapi.NaElement('share-properties')
+ cifs_create.add_child_elem(property_attrs)
+ for aproperty in self.parameters.get('share_properties'):
+ property_attrs.add_new_child('cifs-share-properties', aproperty)
+ if self.parameters.get('symlink_properties'):
+ symlink_attrs = netapp_utils.zapi.NaElement('symlink-properties')
+ cifs_create.add_child_elem(symlink_attrs)
+ for symlink in self.parameters.get('symlink_properties'):
+ symlink_attrs.add_new_child('cifs-share-symlink-properties', symlink)
+ if self.parameters.get('vscan_fileop_profile'):
+ fileop_attrs = netapp_utils.zapi.NaElement('vscan-fileop-profile')
+ fileop_attrs.set_content(self.parameters['vscan_fileop_profile'])
+ cifs_create.add_child_elem(fileop_attrs)
+
+ try:
+ self.server.invoke_successfully(cifs_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+
+ self.module.fail_json(msg='Error creating cifs-share %s: %s'
+ % (self.parameters.get('share_name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_cifs_share(self):
+ """
+ Delete CIFS share
+ """
+ cifs_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-delete', **{'share-name': self.parameters.get('share_name')})
+
+ try:
+ self.server.invoke_successfully(cifs_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cifs-share %s: %s'
+ % (self.parameters.get('share_name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_cifs_share(self):
+ """
+ modilfy path for the given CIFS share
+ """
+ options = {'share-name': self.parameters.get('share_name')}
+ cifs_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-modify', **options)
+ if self.parameters.get('path'):
+ cifs_modify.add_new_child('path', self.parameters.get('path'))
+ if self.parameters.get('share_properties'):
+ property_attrs = netapp_utils.zapi.NaElement('share-properties')
+ cifs_modify.add_child_elem(property_attrs)
+ for aproperty in self.parameters.get('share_properties'):
+ property_attrs.add_new_child('cifs-share-properties', aproperty)
+ if self.parameters.get('symlink_properties'):
+ symlink_attrs = netapp_utils.zapi.NaElement('symlink-properties')
+ cifs_modify.add_child_elem(symlink_attrs)
+ for aproperty in self.parameters.get('symlink_properties'):
+ symlink_attrs.add_new_child('cifs-share-symlink-properties', aproperty)
+ if self.parameters.get('vscan_fileop_profile'):
+ fileop_attrs = netapp_utils.zapi.NaElement('vscan-fileop-profile')
+ fileop_attrs.set_content(self.parameters['vscan_fileop_profile'])
+ cifs_modify.add_child_elem(fileop_attrs)
+ try:
+ self.server.invoke_successfully(cifs_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cifs-share %s:%s'
+ % (self.parameters.get('share_name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Apply action to cifs share'''
+ netapp_utils.ems_log_event("na_ontap_cifs", self.server)
+ current = self.get_cifs_share()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+ if cd_action is None:
+ # ZAPI accepts both 'show-previous-versions' and 'show_previous_versions', but only returns the latter
+ if 'show-previous-versions' in self.parameters.get('share_properties', []) and\
+ current and 'show_previous_versions' in current.get('share_properties', []):
+ self.parameters['share_properties'].remove('show-previous-versions')
+ self.parameters['share_properties'].append('show_previous_versions')
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_share()
+ elif cd_action == 'delete':
+ self.delete_cifs_share()
+ elif modify:
+ self.modify_cifs_share()
+ results = dict(changed=self.na_helper.changed)
+ if modify and netapp_utils.has_feature(self.module, 'show_modified'):
+ results['modify'] = str(modify)
+ self.module.exit_json(**results)
+
+
+def main():
+ '''Execute action from playbook'''
+ cifs_obj = NetAppONTAPCifsShare()
+ cifs_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py
new file mode 100644
index 00000000..90987afe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create or destroy or modify cifs-share-access-controls on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cifs_acl
+options:
+ permission:
+ choices: ['no_access', 'read', 'change', 'full_control']
+ type: str
+ description:
+ -"The access rights that the user or group has on the defined CIFS share."
+ share_name:
+ description:
+ - "The name of the cifs-share-access-control to manage."
+ required: true
+ type: str
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified CIFS share acl should exist or not."
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ user_or_group:
+ description:
+ - "The user or group name for which the permissions are listed."
+ required: true
+ type: str
+short_description: NetApp ONTAP manage cifs-share-access-control
+version_added: 2.6.0
+
+'''
+
+EXAMPLES = """
+ - name: Create CIFS share acl
+ na_ontap_cifs_acl:
+ state: present
+ share_name: cifsShareName
+ user_or_group: Everyone
+ permission: read
+ vserver: "{{ netapp_vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Modify CIFS share acl permission
+ na_ontap_cifs_acl:
+ state: present
+ share_name: cifsShareName
+ user_or_group: Everyone
+ permission: change
+ vserver: "{{ netapp_vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCifsAcl(object):
+ """
+ Methods to create/delete/modify CIFS share/user access-control
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ share_name=dict(required=True, type='str'),
+ user_or_group=dict(required=True, type='str'),
+ permission=dict(required=False, type='str', choices=['no_access', 'read', 'change', 'full_control'])
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['share_name', 'user_or_group', 'permission'])
+ ],
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.state = parameters['state']
+ self.vserver = parameters['vserver']
+ self.share_name = parameters['share_name']
+ self.user_or_group = parameters['user_or_group']
+ self.permission = parameters['permission']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_cifs_acl(self):
+ """
+ Return details about the cifs-share-access-control
+ :param:
+ name : Name of the cifs-share-access-control
+ :return: Details about the cifs-share-access-control. None if not found.
+ :rtype: dict
+ """
+ cifs_acl_iter = netapp_utils.zapi.NaElement('cifs-share-access-control-get-iter')
+ cifs_acl_info = netapp_utils.zapi.NaElement('cifs-share-access-control')
+ cifs_acl_info.add_new_child('share', self.share_name)
+ cifs_acl_info.add_new_child('user-or-group', self.user_or_group)
+ cifs_acl_info.add_new_child('vserver', self.vserver)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_acl_info)
+ cifs_acl_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(cifs_acl_iter, True)
+ return_value = None
+ # check if query returns the expected cifs-share-access-control
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ cifs_acl = result.get_child_by_name('attributes-list').get_child_by_name('cifs-share-access-control')
+ return_value = {
+ 'share': cifs_acl.get_child_content('share'),
+ 'user-or-group': cifs_acl.get_child_content('user-or-group'),
+ 'permission': cifs_acl.get_child_content('permission')
+ }
+
+ return return_value
+
+ def create_cifs_acl(self):
+ """
+ Create access control for the given CIFS share/user-group
+ """
+ cifs_acl_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-create', **{'share': self.share_name,
+ 'user-or-group': self.user_or_group,
+ 'permission': self.permission})
+ try:
+ self.server.invoke_successfully(cifs_acl_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+
+ self.module.fail_json(msg='Error creating cifs-share-access-control %s: %s'
+ % (self.share_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_cifs_acl(self):
+ """
+ Delete access control for the given CIFS share/user-group
+ """
+ cifs_acl_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-delete', **{'share': self.share_name,
+ 'user-or-group': self.user_or_group})
+ try:
+ self.server.invoke_successfully(cifs_acl_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cifs-share-access-control %s: %s'
+ % (self.share_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_cifs_acl_permission(self):
+ """
+ Change permission for the given CIFS share/user-group
+ """
+ cifs_acl_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-modify', **{'share': self.share_name,
+ 'user-or-group': self.user_or_group,
+ 'permission': self.permission})
+ try:
+ self.server.invoke_successfully(cifs_acl_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cifs-share-access-control permission %s:%s'
+ % (self.share_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to cifs-share-access-control
+ """
+ changed = False
+ cifs_acl_exists = False
+ netapp_utils.ems_log_event("na_ontap_cifs_acl", self.server)
+ cifs_acl_details = self.get_cifs_acl()
+ if cifs_acl_details:
+ cifs_acl_exists = True
+ if self.state == 'absent': # delete
+ changed = True
+ elif self.state == 'present':
+ if cifs_acl_details['permission'] != self.permission: # rename
+ changed = True
+ else:
+ if self.state == 'present': # create
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present': # execute create
+ if not cifs_acl_exists:
+ self.create_cifs_acl()
+ else: # execute modify
+ self.modify_cifs_acl_permission()
+ elif self.state == 'absent': # execute delete
+ self.delete_cifs_acl()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ cifs_acl = NetAppONTAPCifsAcl()
+ cifs_acl.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py
new file mode 100644
index 00000000..d1bb6b19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+""" this is cifs_server module
+
+ (c) 2018-2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+---
+module: na_ontap_cifs_server
+short_description: NetApp ONTAP CIFS server configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Creating / deleting and modifying the CIFS server .
+
+options:
+
+ state:
+ description:
+ - Whether the specified cifs_server should exist or not.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ service_state:
+ description:
+ - CIFS Server Administrative Status.
+ choices: ['stopped', 'started']
+ type: str
+
+ name:
+ description:
+ - Specifies the cifs_server name.
+ required: true
+ aliases: ['cifs_server_name']
+ type: str
+
+ admin_user_name:
+ description:
+ - Specifies the cifs server admin username.
+ - When used with absent, the account will be deleted if admin_password is also provided.
+ type: str
+
+ admin_password:
+ description:
+ - Specifies the cifs server admin password.
+ - When used with absent, the account will be deleted if admin_user_name is also provided.
+ type: str
+
+ domain:
+ description:
+ - The Fully Qualified Domain Name of the Windows Active Directory this CIFS server belongs to.
+ type: str
+
+ workgroup:
+ description:
+ - The NetBIOS name of the domain or workgroup this CIFS server belongs to.
+ type: str
+
+ ou:
+ description:
+ - The Organizational Unit (OU) within the Windows Active Directory
+ this CIFS server belongs to.
+ version_added: 2.7.0
+ type: str
+
+ force:
+ type: bool
+ description:
+ - If this is set and a machine account with the same name as
+ specified in 'name' exists in the Active Directory, it
+ will be overwritten and reused.
+ version_added: 2.7.0
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Create cifs_server
+ na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: stopped
+ domain: "{{ id_domain }}"
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete cifs_server
+ na_ontap_cifs_server:
+ state: absent
+ name: data2
+ vserver: svm1
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Start cifs_server
+ na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: started
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Stop cifs_server
+ na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: stopped
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify cifs_server
+ na_ontap_cifs_server:
+ state: present
+ name: data2_new
+ vserver: svm1
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapcifsServer(object):
+ """
+ object to describe cifs_server info
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, choices=['stopped', 'started']),
+ name=dict(required=True, type='str', aliases=['cifs_server_name']),
+ workgroup=dict(required=False, type='str', default=None),
+ domain=dict(required=False, type='str'),
+ admin_user_name=dict(required=False, type='str'),
+ admin_password=dict(required=False, type='str', no_log=True),
+ ou=dict(required=False, type='str'),
+ force=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ params = self.module.params
+
+ # set up state variables
+ self.state = params['state']
+ self.cifs_server_name = params['name']
+ self.workgroup = params['workgroup']
+ self.domain = params['domain']
+ self.vserver = params['vserver']
+ self.service_state = params['service_state']
+ self.admin_user_name = params['admin_user_name']
+ self.admin_password = params['admin_password']
+ self.ou = params['ou']
+ self.force = params['force']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_cifs_server(self):
+ """
+ Return details about the CIFS-server
+ :param:
+ name : Name of the name of the cifs_server
+
+ :return: Details about the cifs_server. None if not found.
+ :rtype: dict
+ """
+ cifs_server_info = netapp_utils.zapi.NaElement('cifs-server-get-iter')
+ cifs_server_attributes = netapp_utils.zapi.NaElement('cifs-server-config')
+ cifs_server_attributes.add_new_child('cifs-server', self.cifs_server_name)
+ cifs_server_attributes.add_new_child('vserver', self.vserver)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_server_attributes)
+ cifs_server_info.add_child_elem(query)
+ result = self.server.invoke_successfully(cifs_server_info, True)
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ cifs_server_attributes = result.get_child_by_name('attributes-list').\
+ get_child_by_name('cifs-server-config')
+ return_value = {
+ 'cifs_server_name': self.cifs_server_name,
+ 'administrative-status': cifs_server_attributes.get_child_content('administrative-status')
+ }
+
+ return return_value
+
+ def create_cifs_server(self):
+ """
+ calling zapi to create cifs_server
+ """
+ options = {'cifs-server': self.cifs_server_name, 'administrative-status': 'up'
+ if self.service_state == 'started' else 'down'}
+ if self.workgroup is not None:
+ options['workgroup'] = self.workgroup
+ if self.domain is not None:
+ options['domain'] = self.domain
+ if self.admin_user_name is not None:
+ options['admin-username'] = self.admin_user_name
+ if self.admin_password is not None:
+ options['admin-password'] = self.admin_password
+ if self.ou is not None:
+ options['organizational-unit'] = self.ou
+ if self.force is not None:
+ options['force-account-overwrite'] = str(self.force).lower()
+
+ cifs_server_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-create', **options)
+
+ try:
+ self.server.invoke_successfully(cifs_server_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error Creating cifs_server %s: %s' %
+ (self.cifs_server_name, to_native(exc)), exception=traceback.format_exc())
+
+ def delete_cifs_server(self):
+ """
+ calling zapi to create cifs_server
+ """
+ if self.cifs_server_name == 'up':
+ self.modify_cifs_server(admin_status='down')
+
+ options = dict()
+ if self.admin_user_name is not None:
+ options['admin-username'] = self.admin_user_name
+ if self.admin_password is not None:
+ options['admin-password'] = self.admin_password
+
+ if options:
+ cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete', **options)
+ else:
+ cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete')
+
+ try:
+ self.server.invoke_successfully(cifs_server_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting cifs_server %s: %s' % (self.cifs_server_name, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def modify_cifs_server(self, admin_status):
+ """
+ RModify the cifs_server.
+ """
+ cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-modify', **{'cifs-server': self.cifs_server_name,
+ 'administrative-status': admin_status, 'vserver': self.vserver})
+ try:
+ self.server.invoke_successfully(cifs_server_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def start_cifs_server(self):
+ """
+ RModify the cifs_server.
+ """
+ cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-start')
+ try:
+ self.server.invoke_successfully(cifs_server_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def stop_cifs_server(self):
+ """
+ RModify the cifs_server.
+ """
+ cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-stop')
+ try:
+ self.server.invoke_successfully(cifs_server_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ calling all cifs_server features
+ """
+
+ changed = False
+ cifs_server_exists = False
+ netapp_utils.ems_log_event("na_ontap_cifs_server", self.server)
+ cifs_server_detail = self.get_cifs_server()
+
+ if cifs_server_detail:
+ cifs_server_exists = True
+
+ if self.state == 'present':
+ administrative_status = cifs_server_detail['administrative-status']
+ if self.service_state == 'started' and administrative_status == 'down':
+ changed = True
+ if self.service_state == 'stopped' and administrative_status == 'up':
+ changed = True
+ else:
+ # we will delete the CIFs server
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not cifs_server_exists:
+ self.create_cifs_server()
+
+ elif self.service_state == 'stopped':
+ self.stop_cifs_server()
+
+ elif self.service_state == 'started':
+ self.start_cifs_server()
+
+ elif self.state == 'absent':
+ self.delete_cifs_server()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ cifs_server = NetAppOntapcifsServer()
+ cifs_server.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py
new file mode 100644
index 00000000..f8f3ce59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py
@@ -0,0 +1,525 @@
+#!/usr/bin/python
+
+# (c) 2017-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_cluster
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_cluster
+short_description: NetApp ONTAP cluster - create a cluster and add/remove nodes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create ONTAP cluster.
+- Add or remove cluster nodes using cluster_ip_address.
+- Adding a node requires ONTAP 9.3 or better.
+- Removing a node requires ONTAP 9.4 or better.
+options:
+ state:
+ description:
+ - Whether the specified cluster should exist (deleting a cluster is not supported).
+ - Whether the node identified by its cluster_ip_address should be in the cluster or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ cluster_name:
+ description:
+ - The name of the cluster to manage.
+ type: str
+ cluster_ip_address:
+ description:
+ - intra cluster IP address of the node to be added or removed.
+ type: str
+ single_node_cluster:
+ description:
+ - Whether the cluster is a single node cluster. Ignored for 9.3 or older versions.
+ - If present, it was observed that 'Cluster' interfaces were deleted, whatever the value.
+ version_added: 19.11.0
+ type: bool
+ cluster_location:
+ description:
+ - Cluster location, only relevant if performing a modify action.
+ version_added: 19.11.0
+ type: str
+ cluster_contact:
+ description:
+ - Cluster contact, only relevant if performing a modify action.
+ version_added: 19.11.0
+ type: str
+ node_name:
+ description:
+ - Name of the node to be added or removed from the cluster.
+ - Be aware that when adding a node, '-' are converted to '_' by the ONTAP backend.
+ - When creating a cluster, C(node_name) is ignored.
+ - When adding a node using C(cluster_ip_address), C(node_name) is optional.
+ - When used to remove a node, C(cluster_ip_address) and C(node_name) are mutually exclusive.
+ version_added: 20.9.0
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create cluster
+ na_ontap_cluster:
+ state: present
+ cluster_name: new_cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add node to cluster (Join cluster)
+ na_ontap_cluster:
+ state: present
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add node to cluster (Join cluster)
+ na_ontap_cluster:
+ state: present
+ cluster_ip_address: 10.10.10.10
+ node_name: my_preferred_node_name
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Create a 2 node cluster in one call
+ na_ontap_cluster:
+ state: present
+ cluster_name: new_cluster
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Remove node from cluster
+ na_ontap_cluster:
+ state: absent
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Remove node from cluster
+ na_ontap_cluster:
+ state: absent
+ node_name: node002
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: modify cluster
+ na_ontap_cluster:
+ state: present
+ cluster_contact: testing
+ cluster_location: testing
+ cluster_name: "{{ netapp_cluster}}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCluster(object):
+ """
+ object initialize and class methods
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ cluster_name=dict(required=False, type='str'),
+ cluster_ip_address=dict(required=False, type='str'),
+ cluster_location=dict(required=False, type='str'),
+ cluster_contact=dict(required=False, type='str'),
+ single_node_cluster=dict(required=False, type='bool'),
+ node_name=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.warnings = list()
+
+ if self.parameters['state'] == 'absent' and self.parameters.get('node_name') is not None and self.parameters.get('cluster_ip_address') is not None:
+ msg = 'when state is "absent", parameters are mutually exclusive: cluster_ip_address|node_name'
+ self.module.fail_json(msg=msg)
+
+ if self.parameters.get('node_name') is not None and '-' in self.parameters.get('node_name'):
+ self.warnings.append('ONTAP ZAPI converts "-" to "_", node_name: %s may be changed or not matched' % self.parameters.get('node_name'))
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_cluster_identity(self, ignore_error=True):
+ ''' get cluster information, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a dictionary of attributes
+ '''
+ zapi = netapp_utils.zapi.NaElement('cluster-identity-get')
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return None
+ self.module.fail_json(msg='Error fetching cluster identity info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ cluster_identity = dict()
+ if result.get_child_by_name('attributes'):
+ identity_info = result.get_child_by_name('attributes').get_child_by_name('cluster-identity-info')
+ if identity_info:
+ cluster_identity['cluster_contact'] = identity_info.get_child_content('cluster-contact')
+ cluster_identity['cluster_location'] = identity_info.get_child_content('cluster-location')
+ cluster_identity['cluster_name'] = identity_info.get_child_content('cluster-name')
+ return cluster_identity
+ return None
+
+ def get_cluster_nodes(self, ignore_error=True):
+ ''' get cluster node names, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a list of nodes
+ '''
+ zapi = netapp_utils.zapi.NaElement('cluster-node-get-iter')
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return None
+ self.module.fail_json(msg='Error fetching cluster identity info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ cluster_nodes = list()
+ if result.get_child_by_name('attributes-list'):
+ for node_info in result.get_child_by_name('attributes-list').get_children():
+ node_name = node_info.get_child_content('node-name')
+ if node_name is not None:
+ cluster_nodes.append(node_name)
+ return cluster_nodes
+ return None
+
+ def get_cluster_ip_addresses(self, cluster_ip_address, ignore_error=True):
+ ''' get list of IP addresses for this cluster
+ return:
+ a list of dictionaries
+ '''
+ if_infos = list()
+ zapi = netapp_utils.zapi.NaElement('net-interface-get-iter')
+ if cluster_ip_address is not None:
+ query = netapp_utils.zapi.NaElement('query')
+ net_info = netapp_utils.zapi.NaElement('net-interface-info')
+ net_info.add_new_child('address', cluster_ip_address)
+ query.add_child_elem(net_info)
+ zapi.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return if_infos
+ self.module.fail_json(msg='Error getting IP addresses: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('attributes-list'):
+ for net_info in result.get_child_by_name('attributes-list').get_children():
+ if net_info:
+ if_info = dict()
+ if_info['address'] = net_info.get_child_content('address')
+ if_info['home_node'] = net_info.get_child_content('home-node')
+ if_infos.append(if_info)
+ return if_infos
+
+ def get_cluster_ip_address(self, cluster_ip_address, ignore_error=True):
+ ''' get node information if it is discoverable
+ return:
+ None if the cluster cannot be reached
+ a dictionary of attributes
+ '''
+ if cluster_ip_address is None:
+ return None
+ nodes = self.get_cluster_ip_addresses(cluster_ip_address, ignore_error=ignore_error)
+ return nodes if len(nodes) > 0 else None
+
+ def create_cluster(self, older_api=False):
+ """
+ Create a cluster
+ """
+ # Note: cannot use node_name here:
+ # 13001:The "-node-names" parameter must be used with either the "-node-uuids" or the "-cluster-ips" parameters.
+ options = {'cluster-name': self.parameters['cluster_name']}
+ if not older_api and self.parameters.get('single_node_cluster') is not None:
+ options['single-node-cluster'] = str(self.parameters['single_node_cluster']).lower()
+ cluster_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-create', **options)
+ try:
+ self.server.invoke_successfully(cluster_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Extra input: single-node-cluster" and not older_api:
+ return self.create_cluster(older_api=True)
+ # Error 36503 denotes node already being used.
+ if to_native(error.code) == "36503":
+ return False
+ self.module.fail_json(msg='Error creating cluster %s: %s'
+ % (self.parameters['cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def add_node(self, older_api=False):
+ """
+ Add a node to an existing cluster
+ 9.2 and 9.3 do not support cluster-ips so fallback to node-ip
+ """
+ if self.parameters.get('cluster_ip_address') is not None:
+ cluster_add_node = netapp_utils.zapi.NaElement('cluster-add-node')
+ if older_api:
+ cluster_add_node.add_new_child('node-ip', self.parameters.get('cluster_ip_address'))
+ else:
+ cluster_ips = netapp_utils.zapi.NaElement('cluster-ips')
+ cluster_ips.add_new_child('ip-address', self.parameters.get('cluster_ip_address'))
+ cluster_add_node.add_child_elem(cluster_ips)
+ if self.parameters.get('node_name') is not None:
+ node_names = netapp_utils.zapi.NaElement('node-names')
+ node_names.add_new_child('string', self.parameters.get('node_name'))
+ cluster_add_node.add_child_elem(node_names)
+
+ else:
+ return False
+ try:
+ self.server.invoke_successfully(cluster_add_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Extra input: cluster-ips" and not older_api:
+ return self.add_node(older_api=True)
+ # skip if error says no failed operations to retry.
+ if to_native(error) == "NetApp API failed. Reason - 13001:There are no failed \"cluster create\" or \"cluster add-node\" operations to retry.":
+ return False
+ self.module.fail_json(msg='Error adding node with ip %s: %s'
+ % (self.parameters.get('cluster_ip_address'), to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def remove_node(self):
+ """
+ Remove a node from an existing cluster
+ """
+ cluster_remove_node = netapp_utils.zapi.NaElement('cluster-remove-node')
+ from_node = ''
+ # cluster-ip and node-name are mutually exclusive:
+ # 13115:Element "cluster-ip" within "cluster-remove-node" has been excluded by another element.
+ if self.parameters.get('cluster_ip_address') is not None:
+ cluster_remove_node.add_new_child('cluster-ip', self.parameters.get('cluster_ip_address'))
+ from_node = 'IP: %s' % self.parameters.get('cluster_ip_address')
+ elif self.parameters.get('node_name') is not None:
+ cluster_remove_node.add_new_child('node', self.parameters.get('node_name'))
+ from_node = 'name: %s' % self.parameters.get('node_name')
+
+ try:
+ self.server.invoke_successfully(cluster_remove_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Unable to find API: cluster-remove-node":
+ msg = 'Error: ZAPI is not available. Removing a node requires ONTAP 9.4 or newer.'
+ self.module.fail_json(msg=msg)
+ self.module.fail_json(msg='Error removing node with %s: %s'
+ % (from_node, to_native(error)), exception=traceback.format_exc())
+
+ def modify_cluster_identity(self, modify):
+ """
+ Modifies the cluster identity
+ """
+ cluster_modify = netapp_utils.zapi.NaElement('cluster-identity-modify')
+ if modify.get('cluster_name') is not None:
+ cluster_modify.add_new_child("cluster-name", modify.get('cluster_name'))
+ if modify.get('cluster_location') is not None:
+ cluster_modify.add_new_child("cluster-location", modify.get('cluster_location'))
+ if modify.get('cluster_contact') is not None:
+ cluster_modify.add_new_child("cluster-contact", modify.get('cluster_contact'))
+
+ try:
+ self.server.invoke_successfully(cluster_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cluster idetity details %s: %s'
+ % (self.parameters['cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def cluster_create_wait(self):
+ """
+ Wait whilst cluster creation completes
+ """
+
+ cluster_wait = netapp_utils.zapi.NaElement('cluster-create-join-progress-get')
+ is_complete = False
+ status = ''
+ wait = False # do not wait on the first call
+
+ while not is_complete and status not in ('failed', 'success'):
+ if wait:
+ time.sleep(10)
+ else:
+ wait = True
+ try:
+ result = self.server.invoke_successfully(cluster_wait, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+
+ self.module.fail_json(msg='Error creating cluster %s: %s'
+ % (self.parameters.get('cluster_name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ clus_progress = result.get_child_by_name('attributes')
+ result = clus_progress.get_child_by_name('cluster-create-join-progress-info')
+ is_complete = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=result.get_child_content('is-complete'))
+ status = result.get_child_content('status')
+
+ if not is_complete and status != 'success':
+ current_status_message = result.get_child_content('current-status-message')
+
+ self.module.fail_json(
+ msg='Failed to create cluster %s: %s' % (self.parameters.get('cluster_name'), current_status_message))
+
+ return is_complete
+
+ def node_add_wait(self):
+ """
+ Wait whilst node is being added to the existing cluster
+ """
+ cluster_node_status = netapp_utils.zapi.NaElement('cluster-add-node-status-get-iter')
+ node_status_info = netapp_utils.zapi.NaElement('cluster-create-add-node-status-info')
+ node_status_info.add_new_child('cluster-ip', self.parameters.get('cluster_ip_address'))
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(node_status_info)
+ cluster_node_status.add_child_elem(query)
+
+ is_complete = None
+ failure_msg = None
+ wait = False # do not wait on the first call
+
+ while is_complete != 'success' and is_complete != 'failure':
+ if wait:
+ time.sleep(10)
+ else:
+ wait = True
+ try:
+ result = self.server.invoke_successfully(cluster_node_status, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Unable to find API: cluster-add-node-status-get-iter":
+ # This API is not supported for 9.3 or earlier releases, just wait a bit
+ time.sleep(60)
+ return
+ self.module.fail_json(msg='Error adding node with ip address %s: %s'
+ % (self.parameters.get('cluster_ip_address'), to_native(error)),
+ exception=traceback.format_exc())
+
+ attributes_list = result.get_child_by_name('attributes-list')
+ join_progress = attributes_list.get_child_by_name('cluster-create-add-node-status-info')
+ is_complete = join_progress.get_child_content('status')
+ failure_msg = join_progress.get_child_content('failure-msg')
+
+ if is_complete != 'success':
+ if 'Node is already in a cluster' in failure_msg:
+ return
+ else:
+ self.module.fail_json(
+ msg='Error adding node with ip address %s' % (self.parameters.get('cluster_ip_address')))
+
+ def node_remove_wait(self):
+ ''' wait for node name or clister IP address to disappear '''
+ node_name = self.parameters.get('node_name')
+ node_ip = self.parameters.get('cluster_ip_address')
+ timer = 180 # 180 seconds
+ while timer > 0:
+ if node_name is not None and node_name not in self.get_cluster_nodes():
+ return
+ if node_ip is not None and self.get_cluster_ip_address(node_ip) is None:
+ return
+ time.sleep(30)
+ timer -= 30
+ self.module.fail_json(msg='Timeout waiting for node to be removed from cluster.')
+
+ def autosupport_log(self):
+ """
+ Autosupport log for cluster
+ :return:
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_cluster", cserver)
+
+ def apply(self):
+ """
+ Apply action to cluster
+ """
+ cluster_action = None
+ node_action = None
+
+ cluster_identity = self.get_cluster_identity(ignore_error=True)
+ if self.parameters.get('cluster_name') is not None:
+ cluster_action = self.na_helper.get_cd_action(cluster_identity, self.parameters)
+ if self.parameters.get('cluster_ip_address') is not None:
+ existing_interfaces = self.get_cluster_ip_address(self.parameters.get('cluster_ip_address'))
+ if self.parameters.get('state') == 'present':
+ node_action = 'add_node' if existing_interfaces is None else None
+ else:
+ node_action = 'remove_node' if existing_interfaces is not None else None
+ if self.parameters.get('node_name') is not None and self.parameters['state'] == 'absent':
+ nodes = self.get_cluster_nodes()
+ if self.parameters.get('node_name') in nodes:
+ node_action = 'remove_node'
+ modify = self.na_helper.get_modified_attributes(cluster_identity, self.parameters)
+
+ if node_action is not None:
+ self.na_helper.changed = True
+
+ if not self.module.check_mode:
+ if cluster_action == 'create':
+ if self.create_cluster():
+ self.cluster_create_wait()
+ if node_action == 'add_node':
+ if self.add_node():
+ self.node_add_wait()
+ elif node_action == 'remove_node':
+ self.remove_node()
+ self.node_remove_wait()
+ if modify:
+ self.modify_cluster_identity(modify)
+ self.autosupport_log()
+ self.module.exit_json(changed=self.na_helper.changed, warnings=self.warnings)
+
+
+def main():
+ """
+ Create object and call apply
+ """
+ cluster_obj = NetAppONTAPCluster()
+ cluster_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py
new file mode 100644
index 00000000..9a57a2bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Enable or disable HA on a cluster"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cluster_ha
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - "Whether HA on cluster should be enabled or disabled."
+ default: present
+short_description: NetApp ONTAP Manage HA status for cluster
+version_added: 2.6.0
+'''
+
+EXAMPLES = """
+ - name: "Enable HA status for cluster"
+ na_ontap_cluster_ha:
+ state: present
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapClusterHA(object):
+ """
+ object initialize and class methods
+ """
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def modify_cluster_ha(self, configure):
+ """
+ Enable or disable HA on cluster
+ :return: None
+ """
+ cluster_ha_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-ha-modify', **{'ha-configured': configure})
+ try:
+ self.server.invoke_successfully(cluster_ha_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cluster HA to %s: %s'
+ % (configure, to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_cluster_ha_enabled(self):
+ """
+ Get current cluster HA details
+ :return: dict if enabled, None if disabled
+ """
+ cluster_ha_get = netapp_utils.zapi.NaElement('cluster-ha-get')
+ try:
+ result = self.server.invoke_successfully(cluster_ha_get,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError:
+ self.module.fail_json(msg='Error fetching cluster HA details',
+ exception=traceback.format_exc())
+ cluster_ha_info = result.get_child_by_name('attributes').get_child_by_name('cluster-ha-info')
+ if cluster_ha_info.get_child_content('ha-configured') == 'true':
+ return {'ha-configured': True}
+ return None
+
+ def apply(self):
+ """
+ Apply action to cluster HA
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_cluster_ha", cserver)
+ current = self.get_cluster_ha_enabled()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.modify_cluster_ha("true")
+ elif cd_action == 'delete':
+ self.modify_cluster_ha("false")
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create object and call apply
+ """
+ ha_obj = NetAppOntapClusterHA()
+ ha_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py
new file mode 100644
index 00000000..3201770f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete cluster peer relations on ONTAP
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cluster_peer
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - Whether the specified cluster peer should exist or not.
+ default: present
+ source_intercluster_lifs:
+ description:
+ - List of intercluster addresses of the source cluster.
+ - Used as peer-addresses in destination cluster.
+ - All these intercluster lifs should belong to the source cluster.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ aliases:
+ - source_intercluster_lif
+ dest_intercluster_lifs:
+ description:
+ - List of intercluster addresses of the destination cluster.
+ - Used as peer-addresses in source cluster.
+ - All these intercluster lifs should belong to the destination cluster.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ aliases:
+ - dest_intercluster_lif
+ passphrase:
+ description:
+ - The arbitrary passphrase that matches the one given to the peer cluster.
+ type: str
+ source_cluster_name:
+ description:
+ - The name of the source cluster name in the peer relation to be deleted.
+ type: str
+ dest_cluster_name:
+ description:
+ - The name of the destination cluster name in the peer relation to be deleted.
+ - Required for delete
+ type: str
+ dest_hostname:
+ description:
+ - Destination cluster IP or hostname which needs to be peered
+ - Required to complete the peering process at destination cluster.
+ required: True
+ type: str
+ dest_username:
+ description:
+ - Destination username.
+ - Optional if this is same as source username.
+ type: str
+ dest_password:
+ description:
+ - Destination password.
+ - Optional if this is same as source password.
+ type: str
+ ipspace:
+ description:
+ - IPspace of the local intercluster LIFs.
+ - Assumes Default IPspace if not provided.
+ type: str
+ version_added: '20.11.0'
+ encryption_protocol_proposed:
+ description:
+ - Encryption protocol to be used for inter-cluster communication.
+ - Only available on ONTAP 9.5 or later.
+ choices: ['tls_psk', 'none']
+ type: str
+ version_added: '20.5.0'
+short_description: NetApp ONTAP Manage Cluster peering
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ - name: Create cluster peer
+ na_ontap_cluster_peer:
+ state: present
+ source_intercluster_lifs: 1.2.3.4,1.2.3.5
+ dest_intercluster_lifs: 1.2.3.6,1.2.3.7
+ passphrase: XXXX
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ dest_hostname: "{{ dest_netapp_hostname }}"
+ encryption_protocol_proposed: tls_psk
+
+ - name: Delete cluster peer
+ na_ontap_cluster_peer:
+ state: absent
+ source_cluster_name: test-source-cluster
+ dest_cluster_name: test-dest-cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ dest_hostname: "{{ dest_netapp_hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPClusterPeer(object):
+ """
+ Class with cluster peer methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ source_intercluster_lifs=dict(required=False, type='list', elements='str', aliases=['source_intercluster_lif']),
+ dest_intercluster_lifs=dict(required=False, type='list', elements='str', aliases=['dest_intercluster_lif']),
+ passphrase=dict(required=False, type='str', no_log=True),
+ dest_hostname=dict(required=True, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True),
+ source_cluster_name=dict(required=False, type='str'),
+ dest_cluster_name=dict(required=False, type='str'),
+ ipspace=dict(required=False, type='str'),
+ encryption_protocol_proposed=dict(required=False, type='str', choices=['tls_psk', 'none'])
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=[['source_intercluster_lifs', 'dest_intercluster_lifs']],
+ required_if=[('state', 'absent', ['source_cluster_name', 'dest_cluster_name'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # set destination server connection
+ self.module.params['hostname'] = self.parameters['dest_hostname']
+ if self.parameters.get('dest_username'):
+ self.module.params['username'] = self.parameters['dest_username']
+ if self.parameters.get('dest_password'):
+ self.module.params['password'] = self.parameters['dest_password']
+ self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # reset to source host connection for asup logs
+ self.module.params['hostname'] = self.parameters['hostname']
+ self.module.params['username'] = self.parameters['username']
+ self.module.params['password'] = self.parameters['password']
+
+ def cluster_peer_get_iter(self, cluster):
+ """
+ Compose NaElement object to query current source cluster using peer-cluster-name and peer-addresses parameters
+ :param cluster: type of cluster (source or destination)
+ :return: NaElement object for cluster-get-iter with query
+ """
+ cluster_peer_get = netapp_utils.zapi.NaElement('cluster-peer-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cluster_peer_info = netapp_utils.zapi.NaElement('cluster-peer-info')
+ if cluster == 'source':
+ peer_lifs, peer_cluster = 'dest_intercluster_lifs', 'dest_cluster_name'
+ else:
+ peer_lifs, peer_cluster = 'source_intercluster_lifs', 'source_cluster_name'
+ if self.parameters.get(peer_lifs):
+ peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
+ for peer in self.parameters.get(peer_lifs):
+ peer_addresses.add_new_child('remote-inet-address', peer)
+ cluster_peer_info.add_child_elem(peer_addresses)
+ if self.parameters.get(peer_cluster):
+ cluster_peer_info.add_new_child('cluster-name', self.parameters[peer_cluster])
+ query.add_child_elem(cluster_peer_info)
+ cluster_peer_get.add_child_elem(query)
+ return cluster_peer_get
+
+ def cluster_peer_get(self, cluster):
+ """
+ Get current cluster peer info
+ :param cluster: type of cluster (source or destination)
+ :return: Dictionary of current cluster peer details if query successful, else return None
+ """
+ cluster_peer_get_iter = self.cluster_peer_get_iter(cluster)
+ result, cluster_info = None, dict()
+ if cluster == 'source':
+ server = self.server
+ else:
+ server = self.dest_server
+ try:
+ result = server.invoke_successfully(cluster_peer_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster peer %s: %s'
+ % (self.parameters['dest_cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster peer details
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ cluster_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('cluster-peer-info')
+ cluster_info['cluster_name'] = cluster_peer_info.get_child_content('cluster-name')
+ peers = cluster_peer_info.get_child_by_name('peer-addresses')
+ cluster_info['peer-addresses'] = [peer.get_content() for peer in peers.get_children()]
+ return cluster_info
+ return None
+
+ def cluster_peer_delete(self, cluster):
+ """
+ Delete a cluster peer on source or destination
+ For source cluster, peer cluster-name = destination cluster name and vice-versa
+ :param cluster: type of cluster (source or destination)
+ :return:
+ """
+ if cluster == 'source':
+ server, peer_cluster_name = self.server, self.parameters['dest_cluster_name']
+ else:
+ server, peer_cluster_name = self.dest_server, self.parameters['source_cluster_name']
+ cluster_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-peer-delete', **{'cluster-name': peer_cluster_name})
+ try:
+ server.invoke_successfully(cluster_peer_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cluster peer %s: %s'
+ % (peer_cluster_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def cluster_peer_create(self, cluster):
+ """
+ Create a cluster peer on source or destination
+ For source cluster, peer addresses = destination inter-cluster LIFs and vice-versa
+ :param cluster: type of cluster (source or destination)
+ :return: None
+ """
+ cluster_peer_create = netapp_utils.zapi.NaElement.create_node_with_children('cluster-peer-create')
+ if self.parameters.get('passphrase') is not None:
+ cluster_peer_create.add_new_child('passphrase', self.parameters['passphrase'])
+ peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
+ if cluster == 'source':
+ server, peer_address = self.server, self.parameters['dest_intercluster_lifs']
+ else:
+ server, peer_address = self.dest_server, self.parameters['source_intercluster_lifs']
+ for each in peer_address:
+ peer_addresses.add_new_child('remote-inet-address', each)
+ cluster_peer_create.add_child_elem(peer_addresses)
+ if self.parameters.get('encryption_protocol_proposed') is not None:
+ cluster_peer_create.add_new_child('encryption-protocol-proposed', self.parameters['encryption_protocol_proposed'])
+ if self.parameters.get('ipspace') is not None:
+ cluster_peer_create.add_new_child('ipspace-name', self.parameters['ipspace'])
+
+ try:
+ server.invoke_successfully(cluster_peer_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating cluster peer %s: %s'
+ % (peer_address, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to cluster peer
+ :return: None
+ """
+ self.asup_log_for_cserver("na_ontap_cluster_peer")
+ source = self.cluster_peer_get('source')
+ destination = self.cluster_peer_get('destination')
+ source_action = self.na_helper.get_cd_action(source, self.parameters)
+ destination_action = self.na_helper.get_cd_action(destination, self.parameters)
+ self.na_helper.changed = False
+ # create only if expected cluster peer relation is not present on both source and destination clusters
+ if source_action == 'create' and destination_action == 'create':
+ if not self.module.check_mode:
+ self.cluster_peer_create('source')
+ self.cluster_peer_create('destination')
+ self.na_helper.changed = True
+ # delete peer relation in cluster where relation is present
+ else:
+ if source_action == 'delete':
+ if not self.module.check_mode:
+ self.cluster_peer_delete('source')
+ self.na_helper.changed = True
+ if destination_action == 'delete':
+ if not self.module.check_mode:
+ self.cluster_peer_delete('destination')
+ self.na_helper.changed = True
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ """
+ Execute action
+ :return: None
+ """
+ community_obj = NetAppONTAPClusterPeer()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py
new file mode 100644
index 00000000..bb8bc364
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+'''
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Run system-cli commands on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_command
+short_description: NetApp ONTAP Run any cli command, the username provided needs to have console login permission.
+version_added: 2.7.0
+options:
+ command:
+ description:
+ - a comma separated list containing the command and arguments.
+ required: true
+ type: list
+ elements: str
+ privilege:
+ description:
+ - privilege level at which to run the command.
+ choices: ['admin', 'advanced']
+ default: admin
+ type: str
+ version_added: 2.8.0
+ return_dict:
+ description:
+ - Returns a parsesable dictionary instead of raw XML output
+ - C(result_value)
+ - C(status) > passed, failed..
+ - C(stdout) > command output in plaintext)
+ - C(stdout_lines) > list of command output lines)
+ - C(stdout_lines_filter) > empty list or list of command output lines matching I(include_lines) or I(exclude_lines) parameters.
+ type: bool
+ default: false
+ version_added: 2.9.0
+ vserver:
+ description:
+ - If running as vserver admin, you must give a I(vserver) or module will fail
+ version_added: "19.10.0"
+ type: str
+ include_lines:
+ description:
+ - applied only when I(return_dict) is true
+ - return only lines containing string pattern in C(stdout_lines_filter)
+ default: ''
+ type: str
+ version_added: "19.10.0"
+ exclude_lines:
+ description:
+ - applied only when I(return_dict) is true
+ - return only lines containing string pattern in C(stdout_lines_filter)
+ default: ''
+ type: str
+ version_added: "19.10.0"
+'''
+
+EXAMPLES = """
+ - name: run ontap cli command
+ na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['version']
+
+ # Same as above, but returns parseable dictonary
+ - name: run ontap cli command
+ na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['node', 'show', '-fields', 'node,health,uptime,model']
+ privilege: 'admin'
+ return_dict: true
+
+ # Same as above, but with lines filtering
+ - name: run ontap cli command
+ na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['node', 'show', '-fields', 'node,health,uptime,model']
+ exlude_lines: 'ode ' # Exclude lines with 'Node ' or 'node'
+ privilege: 'admin'
+ return_dict: true
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCommand(object):
+ ''' calls a CLI command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='list', elements='str'),
+ privilege=dict(required=False, type='str', choices=['admin', 'advanced'], default='admin'),
+ return_dict=dict(required=False, type='bool', default=False),
+ vserver=dict(required=False, type='str'),
+ include_lines=dict(required=False, type='str', default=''),
+ exclude_lines=dict(required=False, type='str', default=''),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.privilege = parameters['privilege']
+ self.vserver = parameters['vserver']
+ self.return_dict = parameters['return_dict']
+ self.include_lines = parameters['include_lines']
+ self.exclude_lines = parameters['exclude_lines']
+
+ self.result_dict = dict()
+ self.result_dict['status'] = ""
+ self.result_dict['result_value'] = 0
+ self.result_dict['invoked_command'] = " ".join(self.command)
+ self.result_dict['stdout'] = ""
+ self.result_dict['stdout_lines'] = []
+ self.result_dict['stdout_lines_filter'] = []
+ self.result_dict['xml_dict'] = dict()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ try:
+ netapp_utils.ems_log_event(event_name, cserver)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Cluster Admin required if -vserver is not passed %s: %s' %
+ (self.command, to_native(error)),
+ exception=traceback.format_exc())
+
+ def run_command(self):
+ ''' calls the ZAPI '''
+ self.ems()
+ command_obj = netapp_utils.zapi.NaElement("system-cli")
+
+ args_obj = netapp_utils.zapi.NaElement("args")
+ if self.return_dict:
+ args_obj.add_new_child('arg', 'set')
+ args_obj.add_new_child('arg', '-showseparator')
+ args_obj.add_new_child('arg', '"###"')
+ args_obj.add_new_child('arg', ';')
+ for arg in self.command:
+ args_obj.add_new_child('arg', arg)
+ command_obj.add_child_elem(args_obj)
+ command_obj.add_new_child('priv', self.privilege)
+
+ try:
+ output = self.server.invoke_successfully(command_obj, True)
+ if self.return_dict:
+ # Parseable dict output
+ retval = self.parse_xml_to_dict(output.to_string())
+ else:
+ # Raw XML output
+ retval = output.to_string()
+
+ return retval
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error running command %s: %s' %
+ (self.command, to_native(error)),
+ exception=traceback.format_exc())
+
+ def ems(self):
+ """
+ Error out if Cluster Admin username is used with Vserver, or Vserver admin used with out vserver being set
+ :return:
+ """
+ if self.vserver:
+ ems_server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+ try:
+ netapp_utils.ems_log_event("na_ontap_command" + str(self.command), ems_server)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Vserver admin required if -vserver is given %s: %s' %
+ (self.command, to_native(error)),
+ exception=traceback.format_exc())
+ else:
+ self.asup_log_for_cserver("na_ontap_command: " + str(self.command))
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = True
+ output = self.run_command()
+ self.module.exit_json(changed=changed, msg=output)
+
+ def parse_xml_to_dict(self, xmldata):
+ '''Parse raw XML from system-cli and create an Ansible parseable dictonary'''
+ xml_import_ok = True
+ xml_parse_ok = True
+
+ try:
+ importing = 'ast'
+ import ast
+ importing = 'xml.parsers.expat'
+ import xml.parsers.expat
+ except ImportError:
+ self.result_dict['status'] = "XML parsing failed. Cannot import %s!" % importing
+ self.result_dict['stdout'] = str(xmldata)
+ self.result_dict['result_value'] = -1
+ xml_import_ok = False
+
+ if xml_import_ok:
+ xml_str = xmldata.decode('utf-8').replace('\n', '---')
+ xml_parser = xml.parsers.expat.ParserCreate()
+ xml_parser.StartElementHandler = self._start_element
+ xml_parser.CharacterDataHandler = self._char_data
+ xml_parser.EndElementHandler = self._end_element
+
+ try:
+ xml_parser.Parse(xml_str)
+ except xml.parsers.expat.ExpatError as errcode:
+ self.result_dict['status'] = "XML parsing failed: " + str(errcode)
+ self.result_dict['stdout'] = str(xmldata)
+ self.result_dict['result_value'] = -1
+ xml_parse_ok = False
+
+ if xml_parse_ok:
+ self.result_dict['status'] = self.result_dict['xml_dict']['results']['attrs']['status']
+ stdout_string = self._format_escaped_data(self.result_dict['xml_dict']['cli-output']['data'])
+ self.result_dict['stdout'] = stdout_string
+ # Generate stdout_lines list
+ for line in stdout_string.split('\n'):
+ stripped_line = line.strip()
+ if len(stripped_line) > 1:
+ self.result_dict['stdout_lines'].append(stripped_line)
+
+ # Generate stdout_lines_filter_list
+ if self.exclude_lines:
+ if self.include_lines in stripped_line and self.exclude_lines not in stripped_line:
+ self.result_dict['stdout_lines_filter'].append(stripped_line)
+ else:
+ if self.include_lines and self.include_lines in stripped_line:
+ self.result_dict['stdout_lines_filter'].append(stripped_line)
+
+ self.result_dict['xml_dict']['cli-output']['data'] = stdout_string
+ cli_result_value = self.result_dict['xml_dict']['cli-result-value']['data']
+ try:
+ # get rid of extra quotes "'1'", but maybe "u'1'" or "b'1'"
+ cli_result_value = ast.literal_eval(cli_result_value)
+ except (SyntaxError, ValueError):
+ pass
+ try:
+ self.result_dict['result_value'] = int(cli_result_value)
+ except ValueError:
+ self.result_dict['result_value'] = cli_result_value
+
+ return self.result_dict
+
+ def _start_element(self, name, attrs):
+ ''' Start XML element '''
+ self.result_dict['xml_dict'][name] = dict()
+ self.result_dict['xml_dict'][name]['attrs'] = attrs
+ self.result_dict['xml_dict'][name]['data'] = ""
+ self.result_dict['xml_dict']['active_element'] = name
+ self.result_dict['xml_dict']['last_element'] = ""
+
+ def _char_data(self, data):
+ ''' Dump XML elemet data '''
+ self.result_dict['xml_dict'][str(self.result_dict['xml_dict']['active_element'])]['data'] = repr(data)
+
+ def _end_element(self, name):
+ self.result_dict['xml_dict']['last_element'] = name
+ self.result_dict['xml_dict']['active_element'] = ""
+
+ @classmethod
+ def _format_escaped_data(cls, datastring):
+ ''' replace helper escape sequences '''
+ formatted_string = datastring.replace('------', '---').replace('---', '\n').replace("###", " ").strip()
+ retval_string = ""
+ for line in formatted_string.split('\n'):
+ stripped_line = line.strip()
+ if len(stripped_line) > 1:
+ retval_string += stripped_line + "\n"
+ return retval_string
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPCommand()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py
new file mode 100644
index 00000000..9833cc7d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_disks
+
+short_description: NetApp ONTAP Assign disks to nodes
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Assign all or part of disks to nodes.
+
+options:
+
+ node:
+ required: true
+ type: str
+ description:
+ - It specifies the node to assign all visible unowned disks.
+
+ disk_count:
+ description:
+ - Total number of disks a node should own
+ type: int
+ version_added: 2.9.0
+
+ disk_type:
+ description:
+ - Assign specified type of disk (or set of disks). The disk_count parameter is mandatory.
+ type: str
+ choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']
+ version_added: '20.6.0'
+
+'''
+
+EXAMPLES = """
+ - name: Assign unowned disks
+ na_ontap_disks:
+ node: cluster-01
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+
+ - name: Assign specified total disks
+ na_ontap_disks:
+
+ disk_count: 56
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+
+ - name: Assign disk with disk type
+ na_ontap_disks:
+ node: cluster-01
+ disk_count: 56
+ disk_type: VMDISK
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapDisks(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ node=dict(required=True, type='str'),
+ disk_count=dict(required=False, type='int'),
+ disk_type=dict(required=False, type='str', choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_unassigned_disk_count(self, disk_type=None):
+ """
+ Check for free disks
+ """
+ disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
+ disk_raid_info = netapp_utils.zapi.NaElement('disk-raid-info')
+ disk_raid_info.add_new_child('container-type', 'unassigned')
+ disk_storage_info.add_child_elem(disk_raid_info)
+
+ disk_query = netapp_utils.zapi.NaElement('query')
+ disk_query.add_child_elem(disk_storage_info)
+
+ if disk_type is not None:
+ disk_inventory_info = netapp_utils.zapi.NaElement('storage-inventory-info')
+ disk_inventory_info.add_new_child('disk-type', disk_type)
+
+ disk_iter.add_child_elem(disk_query)
+
+ try:
+ result = self.server.invoke_successfully(disk_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting disk information: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ return int(result.get_child_content('num-records'))
+
+ def get_owned_disk_count(self, disk_type=None):
+ """
+ Check for owned disks
+ """
+ disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
+ disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info')
+ disk_ownership_info.add_new_child('home-node-name', self.parameters['node'])
+ disk_storage_info.add_child_elem(disk_ownership_info)
+
+ disk_query = netapp_utils.zapi.NaElement('query')
+ disk_query.add_child_elem(disk_storage_info)
+
+ if disk_type is not None:
+ disk_inventory_info = netapp_utils.zapi.NaElement('storage-inventory-info')
+ disk_inventory_info.add_new_child('disk-type', disk_type)
+
+ disk_iter.add_child_elem(disk_query)
+
+ try:
+ result = self.server.invoke_successfully(disk_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting disk information: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ return int(result.get_child_content('num-records'))
+
+ def disk_assign(self, needed_disks, disk_type=None):
+ """
+ Set node as disk owner.
+ """
+ if needed_disks > 0:
+ assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'disk-sanown-assign', **{'owner': self.parameters['node'],
+ 'disk-count': str(needed_disks)})
+ else:
+ assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'disk-sanown-assign', **{'node-name': self.parameters['node'],
+ 'all': 'true'})
+ if disk_type is not None:
+ assign_disk.add_new_child('disk-type', disk_type)
+ try:
+ self.server.invoke_successfully(assign_disk,
+ enable_tunneling=True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "13001":
+ # Error 13060 denotes aggregate is already online
+ return False
+ else:
+ self.module.fail_json(msg='Error assigning disks %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Apply action to disks'''
+ changed = False
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_disks", cserver)
+
+ # check if anything needs to be changed (add/delete/update)
+ unowned_disks = self.get_unassigned_disk_count(disk_type=self.parameters.get('disk_type'))
+ owned_disks = self.get_owned_disk_count(disk_type=self.parameters.get('disk_type'))
+ if 'disk_count' in self.parameters:
+ if self.parameters['disk_count'] < owned_disks:
+ self.module.fail_json(msg="Fewer disks than are currently owned was requested. "
+ "This module does not do any disk removing. "
+ "All disk removing will need to be done manually.")
+ if self.parameters['disk_count'] > owned_disks + unowned_disks:
+ self.module.fail_json(msg="Not enough unowned disks remain to fulfill request")
+ if unowned_disks >= 1:
+ if 'disk_count' in self.parameters:
+ if self.parameters['disk_count'] > owned_disks:
+ needed_disks = self.parameters['disk_count'] - owned_disks
+ if not self.module.check_mode:
+ self.disk_assign(needed_disks, disk_type=self.parameters.get('disk_type'))
+ changed = True
+ else:
+ if not self.module.check_mode:
+ self.disk_assign(0)
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ ''' Create object and call apply '''
+ obj_aggr = NetAppOntapDisks()
+ obj_aggr.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py
new file mode 100644
index 00000000..b3755a2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_dns
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_dns
+short_description: NetApp ONTAP Create, delete, modify DNS servers.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, modify DNS servers.
+- With REST, the module is currently limited to data vservers for delete or modify operations.
+options:
+ state:
+ description:
+ - Whether the DNS servers should be enabled for the given vserver.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ domains:
+ description:
+ - List of DNS domains such as 'sales.bar.com'. The first domain is the one that the Vserver belongs to.
+ type: list
+ elements: str
+
+ nameservers:
+ description:
+ - List of IPv4 addresses of name servers such as '123.123.123.123'.
+ type: list
+ elements: str
+
+ skip_validation:
+ type: bool
+ description:
+ - By default, all nameservers are checked to validate they are available to resolve.
+ - If you DNS servers are not yet installed or momentarily not available, you can set this option to 'true'
+ - to bypass the check for all servers specified in nameservers field.
+ version_added: 2.8.0
+'''
+
+EXAMPLES = """
+ - name: create DNS
+ na_ontap_dns:
+ state: present
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ vserver: "{{vservername}}"
+ domains: sales.bar.com
+ nameservers: 10.193.0.250,10.192.0.250
+ skip_validation: true
+"""
+
+RETURN = """
+
+"""
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapDns(object):
+ """
+ Enable and Disable dns
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ domains=dict(required=False, type='list', elements='str'),
+ nameservers=dict(required=False, type='list', elements='str'),
+ skip_validation=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['domains', 'nameservers'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Cluster vserver and data vserver use different REST API.
+ self.is_cluster = False
+
+ # REST API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['skip_validation']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+
+ if error is not None:
+ self.module.fail_json(msg=error)
+
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def create_dns(self):
+ """
+ Create DNS server
+ :return: none
+ """
+ if self.use_rest:
+ if self.is_cluster:
+ api = 'cluster'
+ params = {
+ 'dns_domains': self.parameters['domains'],
+ 'name_servers': self.parameters['nameservers']
+ }
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ api = 'name-services/dns'
+ params = {
+ 'domains': self.parameters['domains'],
+ 'servers': self.parameters['nameservers'],
+ 'svm': {
+ 'name': self.parameters['vserver']
+ }
+ }
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ dns = netapp_utils.zapi.NaElement('net-dns-create')
+ nameservers = netapp_utils.zapi.NaElement('name-servers')
+ domains = netapp_utils.zapi.NaElement('domains')
+ for each in self.parameters['nameservers']:
+ ip_address = netapp_utils.zapi.NaElement('ip-address')
+ ip_address.set_content(each)
+ nameservers.add_child_elem(ip_address)
+ dns.add_child_elem(nameservers)
+ for each in self.parameters['domains']:
+ domain = netapp_utils.zapi.NaElement('string')
+ domain.set_content(each)
+ domains.add_child_elem(domain)
+ dns.add_child_elem(domains)
+ if self.parameters.get('skip_validation'):
+ validation = netapp_utils.zapi.NaElement('skip-config-validation')
+ validation.set_content(str(self.parameters['skip_validation']))
+ dns.add_child_elem(validation)
+ try:
+ self.server.invoke_successfully(dns, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating dns: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def destroy_dns(self, dns_attrs):
+ """
+ Destroys an already created dns
+ :return:
+ """
+ if self.use_rest:
+ if self.is_cluster:
+ error = 'cluster operation for deleting DNS is not supported with REST.'
+ self.module.fail_json(msg=error)
+ api = 'name-services/dns/' + dns_attrs['uuid']
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('net-dns-destroy'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying dns %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_cluster(self):
+ api = "cluster"
+ message, error = self.rest_api.get(api, None)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ self.module.fail_json(msg="no data from cluster %s" % str(message))
+ return message
+
+ def get_cluster_dns(self):
+ cluster_attrs = self.get_cluster()
+ dns_attrs = None
+ if self.parameters['vserver'] == cluster_attrs['name']:
+ dns_attrs = {
+ 'domains': cluster_attrs.get('dns_domains'),
+ 'nameservers': cluster_attrs.get('name_servers'),
+ 'uuid': cluster_attrs['uuid'],
+ }
+ self.is_cluster = True
+ if dns_attrs['domains'] is None and dns_attrs['nameservers'] is None:
+ dns_attrs = None
+ return dns_attrs
+
+ def get_dns(self):
+ if self.use_rest:
+ api = "name-services/dns"
+ params = {'fields': 'domains,servers,svm',
+ "svm.name": self.parameters['vserver']}
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ message = None
+ elif 'records' in message and len(message['records']) == 0:
+ message = None
+ elif 'records' not in message or len(message['records']) != 1:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ if message is not None:
+ record = message['records'][0]
+ attrs = {
+ 'domains': record['domains'],
+ 'nameservers': record['servers'],
+ 'uuid': record['svm']['uuid']
+ }
+ return attrs
+ return None
+ else:
+ dns_obj = netapp_utils.zapi.NaElement('net-dns-get')
+ try:
+ result = self.server.invoke_successfully(dns_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # 15661 is object not found
+ return None
+ else:
+ self.module.fail_json(msg=to_native(
+ error), exception=traceback.format_exc())
+
+ # read data for modify
+ attrs = dict()
+ attributes = result.get_child_by_name('attributes')
+ dns_info = attributes.get_child_by_name('net-dns-info')
+ nameservers = dns_info.get_child_by_name('name-servers')
+ attrs['nameservers'] = [each.get_content() for each in nameservers.get_children()]
+ domains = dns_info.get_child_by_name('domains')
+ attrs['domains'] = [each.get_content() for each in domains.get_children()]
+ attrs['skip_validation'] = dns_info.get_child_by_name('skip-config-validation')
+ return attrs
+
+ def modify_dns(self, dns_attrs):
+ if self.use_rest:
+ changed = False
+ params = {}
+ if dns_attrs['nameservers'] != self.parameters['nameservers']:
+ changed = True
+ params['servers'] = self.parameters['nameservers']
+ if dns_attrs['domains'] != self.parameters['domains']:
+ changed = True
+ params['domains'] = self.parameters['domains']
+ if changed and not self.module.check_mode:
+ uuid = dns_attrs['uuid']
+ api = "name-services/dns/" + uuid
+ if self.is_cluster:
+ api = 'cluster'
+ params = {
+ 'dns_domains': self.parameters['domains'],
+ 'name_servers': self.parameters['nameservers']
+ }
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ changed = False
+ dns = netapp_utils.zapi.NaElement('net-dns-modify')
+ if dns_attrs['nameservers'] != self.parameters['nameservers']:
+ changed = True
+ nameservers = netapp_utils.zapi.NaElement('name-servers')
+ for each in self.parameters['nameservers']:
+ ip_address = netapp_utils.zapi.NaElement('ip-address')
+ ip_address.set_content(each)
+ nameservers.add_child_elem(ip_address)
+ dns.add_child_elem(nameservers)
+ if dns_attrs['domains'] != self.parameters['domains']:
+ changed = True
+ domains = netapp_utils.zapi.NaElement('domains')
+ for each in self.parameters['domains']:
+ domain = netapp_utils.zapi.NaElement('string')
+ domain.set_content(each)
+ domains.add_child_elem(domain)
+ dns.add_child_elem(domains)
+ if changed and not self.module.check_mode:
+ if self.parameters.get('skip_validation'):
+ validation = netapp_utils.zapi.NaElement('skip-config-validation')
+ validation.set_content(str(self.parameters['skip_validation']))
+ dns.add_child_elem(validation)
+ try:
+ self.server.invoke_successfully(dns, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying dns %s' %
+ (to_native(error)), exception=traceback.format_exc())
+ return changed
+
+ def apply(self):
+ # asup logging
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_dns", self.server)
+ dns_attrs = self.get_dns()
+ if self.use_rest and dns_attrs is None:
+ # There is a chance we are working at the cluster level
+ dns_attrs = self.get_cluster_dns()
+ changed = False
+ if self.parameters['state'] == 'present':
+ if dns_attrs is not None:
+ changed = self.modify_dns(dns_attrs)
+ else:
+ if not self.module.check_mode:
+ self.create_dns()
+ changed = True
+ else:
+ if dns_attrs is not None:
+ if not self.module.check_mode:
+ self.destroy_dns(dns_attrs)
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Create, Delete, Modify DNS servers.
+ """
+ obj = NetAppOntapDns()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py
new file mode 100644
index 00000000..def8c76e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_efficiency_policy
+short_description: NetApp ONTAP manage efficiency policies (sis policies)
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete efficiency policies (sis policies)
+options:
+ state:
+ description:
+ - Whether the specified efficiency policy should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ policy_name:
+ description:
+ - the name of the efficiency policy
+ required: true
+ type: str
+
+ comment:
+ description:
+ - A brief description of the policy.
+ type: str
+
+ duration:
+ description:
+ - The duration in hours for which the scheduled efficiency operation should run.
+ After this time expires, the efficiency operation will be stopped even if the operation is incomplete.
+ If '-' is specified as the duration, the efficiency operation will run till it completes. Otherwise, the duration has to be an integer greater than 0.
+ By default, the operation runs till it completes.
+ type: str
+
+ enabled:
+ description:
+ - If the value is true, the efficiency policy is active in this cluster.
+ If the value is false this policy will not be activated by the schedulers and hence will be inactive.
+ type: bool
+
+ policy_type:
+ description:
+ - The policy type reflects the reason a volume using this policy will start processing a changelog.
+ - (Changelog processing is identifying and eliminating duplicate blocks which were written since the changelog was last processed.)
+ - threshold Changelog processing occurs once the changelog reaches a certain percent full.
+ - scheduled Changelog processing will be triggered by time.
+ choices: ['threshold', 'scheduled']
+ type: str
+
+ qos_policy:
+ description:
+ - QoS policy for the efficiency operation.
+ - background efficiency operation will run in background with minimal or no impact on data serving client operations,
+ - best-effort efficiency operations may have some impact on data serving client operations.
+ choices: ['background', 'best_effort']
+ type: str
+
+ schedule:
+ description:
+ - Cron type job schedule name. When the associated policy is set on a volume, the efficiency operation will be triggered for the volume on this schedule.
+ - These schedules can be created using the na_ontap_job_schedule module
+ type: str
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ changelog_threshold_percent:
+ description:
+ - Specifies the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour.
+ type: int
+ version_added: '19.11.0'
+'''
+
+EXAMPLES = """
+ - name: Create threshold efficiency policy
+ na_ontap_efficiency_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ vserver: ansible
+ state: present
+ policy_name: test
+ comment: This policy is for x and y
+ enabled: true
+ policy_type: threshold
+ qos_policy: background
+ changelog_threshold_percent: 20
+
+ - name: Create efficiency Scheduled efficiency Policy
+ na_ontap_efficiency_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ vserver: ansible
+ state: present
+ policy_name: test2
+ comment: This policy is for x and y
+ enabled: true
+ schedule: new_job_schedule
+ duration: 1
+ policy_type: scheduled
+ qos_policy: background
+"""
+
+RETURN = """
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapEfficiencyPolicy(object):
+ """
+ Create, delete and modify efficiency policy
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ policy_name=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ duration=dict(required=False, type='str'),
+ enabled=dict(required=False, type='bool'),
+ policy_type=dict(required=False, choices=['threshold', 'scheduled']),
+ qos_policy=dict(required=False, choices=['background', 'best_effort']),
+ schedule=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ changelog_threshold_percent=dict(required=False, type='int')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('changelog_threshold_percent', 'duration'), ('changelog_threshold_percent', 'schedule')]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+ if self.parameters.get('policy_type'):
+ if self.parameters['policy_type'] == 'threshold':
+ if self.parameters.get('duration'):
+ self.module.fail_json(msg="duration cannot be set if policy_type is threshold")
+ if self.parameters.get('schedule'):
+ self.module.fail_json(msg='schedule cannot be set if policy_type is threshold')
+ # if policy_type is 'scheduled'
+ else:
+ if self.parameters.get('changelog_threshold_percent'):
+ self.module.fail_json(msg='changelog_threshold_percent cannot be set if policy_type is scheduled')
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+
+ self.na_helper.zapi_int_keys = {
+ 'changelog_threshold_percent': 'changelog-threshold-percent'
+ }
+ self.na_helper.zapi_str_keys = {
+ 'policy_name': 'policy-name',
+ 'comment': 'comment',
+ 'policy_type': 'policy-type',
+ 'qos_policy': 'qos-policy',
+ 'schedule': 'schedule',
+ 'duration': 'duration'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'enabled': 'enabled'
+ }
+
+ def get_efficiency_policy(self):
+ """
+ Get a efficiency policy
+ :return: a efficiency-policy info
+ """
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-get-iter")
+ query = netapp_utils.zapi.NaElement("query")
+ sis_policy_info = netapp_utils.zapi.NaElement("sis-policy-info")
+ sis_policy_info.add_new_child("policy-name", self.parameters['policy_name'])
+ sis_policy_info.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(sis_policy_info)
+ sis_policy_obj.add_child_elem(query)
+ try:
+ results = self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error searching for efficiency policy %s: %s" % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return_value = {}
+ if results.get_child_by_name('num-records') and int(results.get_child_content('num-records')) == 1:
+ attributes_list = results.get_child_by_name('attributes-list')
+ sis_info = attributes_list.get_child_by_name('sis-policy-info')
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ return_value[option] = self.na_helper.get_value_for_int(from_zapi=True, value=sis_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ return_value[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=sis_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ return_value[option] = sis_info.get_child_content(zapi_key)
+ return return_value
+ return None
+
+ def create_efficiency_policy(self):
+ """
+ Creates a efficiency policy
+ :return: None
+ """
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-create")
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key,
+ self.na_helper.get_value_for_int(from_zapi=False,
+ value=self.parameters[option]))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key,
+ self.na_helper.get_value_for_bool(from_zapi=False,
+ value=self.parameters[option]))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key, str(self.parameters[option]))
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_efficiency_policy(self):
+ """
+ Delete a efficiency Policy
+ :return: None
+ """
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-delete")
+ sis_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_efficiency_policy(self, current, modify):
+ """
+ Modify a efficiency policy
+ :return: None
+ """
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-modify")
+ sis_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ # sis-policy-create zapi pre-checks the options and fails if it's not supported.
+ # sis-policy-modify pre-checks one of the options, but tries to modify the others even it's not supported. And it will mess up the vsim.
+ # Do the checks before sending to the zapi.
+ if current['policy_type'] == 'scheduled' and self.parameters.get('policy_type') != 'threshold':
+ if modify.get('changelog_threshold_percent'):
+ self.module.fail_json(msg="changelog_threshold_percent cannot be set if policy_type is scheduled")
+ elif current['policy_type'] == 'threshold' and self.parameters.get('policy_type') != 'scheduled':
+ if modify.get('duration'):
+ self.module.fail_json(msg="duration cannot be set if policy_type is threshold")
+ elif modify.get('schedule'):
+ self.module.fail_json(msg="schedule cannot be set if policy_type is threshold")
+ for attribute in modify:
+ sis_policy_obj.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_efficiency_policy", self.server)
+ current = self.get_efficiency_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_efficiency_policy()
+ elif cd_action == 'delete':
+ self.delete_efficiency_policy()
+ elif modify:
+ self.modify_efficiency_policy(current, modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapEfficiencyPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py
new file mode 100644
index 00000000..39c01e08
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_export_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_export_policy
+short_description: NetApp ONTAP manage export-policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create or destroy or rename export-policies on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified export policy should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the export-policy to manage.
+ type: str
+ required: true
+ from_name:
+ description:
+ - The name of the export-policy to be renamed.
+ type: str
+ version_added: 2.7.0
+ vserver:
+ required: true
+ type: str
+ description:
+ - Name of the vserver to use.
+'''
+
+EXAMPLES = """
+ - name: Create Export Policy
+ na_ontap_export_policy:
+ state: present
+ name: ansiblePolicyName
+ vserver: vs_hack
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Rename Export Policy
+ na_ontap_export_policy:
+ action: present
+ from_name: ansiblePolicyName
+ vserver: vs_hack
+ name: newPolicyName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete Export Policy
+ na_ontap_export_policy:
+ state: absent
+ name: ansiblePolicyName
+ vserver: vs_hack
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPExportPolicy(object):
+ """
+ Class with export policy methods
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str', default=None),
+ vserver=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_export_policy(self, name=None, uuid=None):
+ """
+ Return details about the export-policy
+ :param:
+ name : Name of the export-policy
+ :return: Details about the export-policy. None if not found.
+ :rtype: dict
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ params = {'fields': 'name',
+ 'name': name,
+ 'svm.uuid': uuid}
+ api = 'protocols/nfs/export-policies/'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching export policy: %s" % error)
+ if message['num_records'] > 0:
+ return {'policy-name': message['records'][0]['name']}
+ else:
+ return None
+
+ else:
+ export_policy_iter = netapp_utils.zapi.NaElement('export-policy-get-iter')
+ export_policy_info = netapp_utils.zapi.NaElement('export-policy-info')
+ export_policy_info.add_new_child('policy-name', name)
+ export_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(export_policy_info)
+ export_policy_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(export_policy_iter, True)
+ return_value = None
+ # check if query returns the expected export-policy
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ export_policy = result.get_child_by_name('attributes-list').get_child_by_name('export-policy-info').get_child_by_name('policy-name')
+ return_value = {
+ 'policy-name': export_policy
+ }
+ return return_value
+
+ def create_export_policy(self, uuid=None):
+ """
+ Creates an export policy
+ """
+ if self.use_rest:
+ params = {'name': self.parameters['name'],
+ 'svm.uuid': uuid}
+ api = 'protocols/nfs/export-policies'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating export policy: %s" % error)
+ else:
+ export_policy_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-create', **{'policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on creating export-policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_export_policy(self, policy_id=None):
+ """
+ Delete export-policy
+ """
+ if self.use_rest:
+ api = 'protocols/nfs/export-policies/' + str(policy_id)
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg=" Error on deleting export policy: %s" % error)
+ else:
+ export_policy_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-destroy', **{'policy-name': self.parameters['name'], })
+ try:
+ self.server.invoke_successfully(export_policy_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on deleting export-policy %s: %s'
+ % (self.parameters['name'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def rename_export_policy(self, policy_id=None):
+ """
+ Rename the export-policy.
+ """
+ if self.use_rest:
+ params = {'name': self.parameters['name']}
+ api = 'protocols/nfs/export-policies/' + str(policy_id)
+ dummy, error = self.rest_api.patch(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on renaming export policy: %s" % error)
+ else:
+ export_policy_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-rename', **{'policy-name': self.parameters['from_name'],
+ 'new-policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on renaming export-policy %s:%s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_export_policy_id(self, name=None):
+ """
+ Get a export policy's id
+ :return: id of the export policy
+ """
+ if name is None:
+ name = self.parameters['name']
+
+ params = {'fields': 'id',
+ 'svm.name': self.parameters['vserver'],
+ 'name': name
+ }
+ api = 'protocols/nfs/export-policies'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ if message['num_records'] == 0:
+ return None
+ else:
+ return message['records'][0]['id']
+
+ def get_export_policy_svm_uuid(self):
+ """
+ Get a svm's uuid
+ :return: uuid of the svm
+ """
+ params = {'svm.name': self.parameters['vserver']}
+ api = 'protocols/nfs/export-policies'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ return message['records'][0]['svm']['uuid']
+
+ def apply(self):
+ """
+ Apply action to export-policy
+ """
+ policy_id, uuid = None, None
+ cd_action, rename = None, None
+
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_export_policy", self.server)
+ if self.use_rest:
+ uuid = self.get_export_policy_svm_uuid()
+ if self.parameters.get('from_name'):
+ policy_id = self.get_export_policy_id(self.parameters['from_name'])
+ else:
+ policy_id = self.get_export_policy_id()
+
+ current = self.get_export_policy(uuid=uuid)
+
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_export_policy(self.parameters['from_name']), current)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming: export policy %s does not exist" % self.parameters['from_name'])
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_export_policy(policy_id=policy_id)
+ elif cd_action == 'create':
+ self.create_export_policy(uuid=uuid)
+ elif cd_action == 'delete':
+ self.delete_export_policy(policy_id=policy_id)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action
+ """
+ export_policy = NetAppONTAPExportPolicy()
+ export_policy.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py
new file mode 100644
index 00000000..7b0b0acc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+
+# (c) 2018-2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_export_policy_rule
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_export_policy_rule
+
+short_description: NetApp ONTAP manage export policy rules
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete or modify export rules in ONTAP
+
+options:
+ state:
+ description:
+ - Whether the specified export policy rule should exist or not.
+ required: false
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the export policy this rule will be added to (or modified, or removed from).
+ required: True
+ type: str
+ aliases:
+ - policy_name
+
+ client_match:
+ description:
+ - List of Client Match host names, IP Addresses, Netgroups, or Domains
+ - If rule_index is not provided, client_match is used as a key to fetch current rule to determine create,delete,modify actions.
+ If a rule with provided client_match exists, a new rule will not be created, but the existing rule will be modified or deleted.
+ If a rule with provided client_match doesn't exist, a new rule will be created if state is present.
+ type: list
+ elements: str
+
+ anonymous_user_id:
+ description:
+ - User name or ID to which anonymous users are mapped. Default value is '65534'.
+ type: int
+
+ ro_rule:
+ description:
+ - List of Read only access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ rw_rule:
+ description:
+ - List of Read Write access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ super_user_security:
+ description:
+ - List of Read Write access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ allow_suid:
+ description:
+ - If 'true', NFS server will honor SetUID bits in SETATTR operation. Default value on creation is 'true'
+ type: bool
+
+ protocol:
+ description:
+ - List of Client access protocols.
+ - Default value is set to 'any' during create.
+ choices: [any,nfs,nfs3,nfs4,cifs,flexcache]
+ type: list
+ elements: str
+
+ rule_index:
+ description:
+ - index of the export policy rule
+ type: int
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Create ExportPolicyRule
+ na_ontap_export_policy_rule:
+ state: present
+ name: default123
+ vserver: ci_dev
+ client_match: 0.0.0.0/0,1.1.1.0/24
+ ro_rule: krb5,krb5i
+ rw_rule: any
+ protocol: nfs,nfs3
+ super_user_security: any
+ anonymous_user_id: 65534
+ allow_suid: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify ExportPolicyRule
+ na_ontap_export_policy_rule:
+ state: present
+ name: default123
+ rule_index: 100
+ client_match: 0.0.0.0/0
+ anonymous_user_id: 65521
+ ro_rule: ntlm
+ rw_rule: any
+ protocol: any
+ allow_suid: false
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete ExportPolicyRule
+ na_ontap_export_policy_rule:
+ state: absent
+ name: default123
+ rule_index: 100
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppontapExportRule(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', aliases=['policy_name']),
+ protocol=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'nfs', 'nfs3', 'nfs4', 'cifs', 'flexcache']),
+ client_match=dict(required=False, type='list', elements='str'),
+ ro_rule=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ rw_rule=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ super_user_security=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ allow_suid=dict(required=False, type='bool'),
+ rule_index=dict(required=False, type='int'),
+ anonymous_user_id=dict(required=False, type='int'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'client_match': 'client-match',
+ 'name': 'policy-name'
+ }
+ self.na_helper.zapi_list_keys = {
+ 'protocol': ('protocol', 'access-protocol'),
+ 'ro_rule': ('ro-rule', 'security-flavor'),
+ 'rw_rule': ('rw-rule', 'security-flavor'),
+ 'super_user_security': ('super-user-security', 'security-flavor'),
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'allow_suid': 'is-allow-set-uid-enabled'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'rule_index': 'rule-index',
+ 'anonymous_user_id': 'anonymous-user-id'
+
+ }
+
+ def set_query_parameters(self):
+ """
+ Return dictionary of query parameters and
+ :return:
+ """
+ query = {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ if self.parameters.get('rule_index'):
+ query['rule-index'] = self.parameters['rule_index']
+ elif self.parameters.get('client_match'):
+ query['client-match'] = self.parameters['client_match']
+ else:
+ self.module.fail_json(
+ msg="Need to specify at least one of the rule_index and client_match option.")
+
+ attributes = {
+ 'query': {
+ 'export-rule-info': query
+ }
+ }
+ return attributes
+
+ def get_export_policy_rule(self):
+ """
+ Return details about the export policy rule
+ :param:
+ name : Name of the export_policy
+ :return: Details about the export_policy. None if not found.
+ :rtype: dict
+ """
+ current, result = None, None
+ rule_iter = netapp_utils.zapi.NaElement('export-rule-get-iter')
+ rule_iter.translate_struct(self.set_query_parameters())
+ try:
+ result = self.server.invoke_successfully(rule_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result is not None and \
+ result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ current = dict()
+ rule_info = result.get_child_by_name('attributes-list').get_child_by_name('export-rule-info')
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ current[item_key] = rule_info.get_child_content(zapi_key)
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ current[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=rule_info[zapi_key])
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ current[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=rule_info[zapi_key])
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ current[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=rule_info.get_child_by_name(parent))
+ current['num_records'] = int(result.get_child_content('num-records'))
+ if not self.parameters.get('rule_index'):
+ self.parameters['rule_index'] = current['rule_index']
+ return current
+
+ def get_export_policy(self):
+ """
+ Return details about the export-policy
+ :param:
+ name : Name of the export-policy
+
+ :return: Details about the export-policy. None if not found.
+ :rtype: dict
+ """
+ export_policy_iter = netapp_utils.zapi.NaElement('export-policy-get-iter')
+ attributes = {
+ 'query': {
+ 'export-policy-info': {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+
+ export_policy_iter.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(export_policy_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting export policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ return result
+
+ return None
+
+ def add_parameters_for_create_or_modify(self, na_element_object, values):
+ """
+ Add children node for create or modify NaElement object
+ :param na_element_object: modify or create NaElement object
+ :param values: dictionary of cron values to be added
+ :return: None
+ """
+ for key in values:
+ if key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(key)
+ na_element_object[zapi_key] = values[key]
+ elif key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(key)
+ na_element_object.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent=parent_key,
+ zapi_child=child_key,
+ data=values[key]))
+ elif key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(key)
+ na_element_object[zapi_key] = self.na_helper.get_value_for_int(from_zapi=False,
+ value=values[key])
+ elif key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(key)
+ na_element_object[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
+ value=values[key])
+
+ def create_export_policy_rule(self):
+ """
+ create rule for the export policy.
+ """
+ for key in ['client_match', 'ro_rule', 'rw_rule']:
+ if self.parameters.get(key) is None:
+ self.module.fail_json(msg='Error: Missing required param for creating export policy rule %s' % key)
+ export_rule_create = netapp_utils.zapi.NaElement('export-rule-create')
+ self.add_parameters_for_create_or_modify(export_rule_create, self.parameters)
+ try:
+ self.server.invoke_successfully(export_rule_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def create_export_policy(self):
+ """
+ Creates an export policy
+ """
+ export_policy_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-create', **{'policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating export-policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_export_policy_rule(self, rule_index):
+ """
+ delete rule for the export policy.
+ """
+ export_rule_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-rule-destroy', **{'policy-name': self.parameters['name'],
+ 'rule-index': str(rule_index)})
+
+ try:
+ self.server.invoke_successfully(export_rule_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_export_policy_rule(self, params):
+ '''
+ Modify an existing export policy rule
+ :param params: dict() of attributes with desired values
+ :return: None
+ '''
+ export_rule_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-rule-modify', **{'policy-name': self.parameters['name'],
+ 'rule-index': str(self.parameters['rule_index'])})
+ self.add_parameters_for_create_or_modify(export_rule_modify, params)
+ try:
+ self.server.invoke_successfully(export_rule_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying allow_suid %s: %s'
+ % (self.parameters['allow_suid'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ netapp_utils.ems_log_event("na_ontap_export_policy_rules", self.server)
+
+ def apply(self):
+ ''' Apply required action from the play'''
+ self.autosupport_log()
+ # convert client_match list to comma-separated string
+ if self.parameters.get('client_match') is not None:
+ self.parameters['client_match'] = ','.join(self.parameters['client_match'])
+ self.parameters['client_match'] = self.parameters['client_match'].replace(' ', '')
+
+ current, modify = self.get_export_policy_rule(), None
+ action = self.na_helper.get_cd_action(current, self.parameters)
+ if action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ # create export policy (if policy doesn't exist) only when changed=True
+ if action == 'create':
+ if not self.get_export_policy():
+ self.create_export_policy()
+ self.create_export_policy_rule()
+ elif action == 'delete':
+ if current['num_records'] > 1:
+ self.module.fail_json(msg='Multiple export policy rules exist.'
+ 'Please specify a rule_index to delete')
+ self.delete_export_policy_rule(current['rule_index'])
+ elif modify:
+ self.modify_export_policy_rule(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ ''' Create object and call apply '''
+ rule_obj = NetAppontapExportRule()
+ rule_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py
new file mode 100644
index 00000000..d0edd545
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_fcp
+short_description: NetApp ONTAP Start, Stop and Enable FCP services.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Start, Stop and Enable FCP services.
+options:
+ state:
+ description:
+ - Whether the FCP should be enabled or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ status:
+ description:
+ - Whether the FCP should be up or down
+ choices: ['up', 'down']
+ type: str
+ default: up
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: create FCP
+ na_ontap_fcp:
+ state: present
+ status: down
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ vserver: "{{vservername}}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapFCP(object):
+ """
+ Enable and Disable FCP
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ status=dict(required=False, type='str', choices=['up', 'down'], default='up')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def create_fcp(self):
+ """
+ Create's and Starts an FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-create'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating FCP: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def start_fcp(self):
+ """
+ Starts an existing FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-start'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13013 denotes fcp service already started.
+ if to_native(error.code) == "13013":
+ return None
+ else:
+ self.module.fail_json(msg='Error starting FCP %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def stop_fcp(self):
+ """
+ Steps an Existing FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-stop'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Stoping FCP %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def destroy_fcp(self):
+ """
+ Destroys an already stopped FCP
+ :return:
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-destroy'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying FCP %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_fcp(self):
+ fcp_obj = netapp_utils.zapi.NaElement('fcp-service-get-iter')
+ fcp_info = netapp_utils.zapi.NaElement('fcp-service-info')
+ fcp_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(fcp_info)
+ fcp_obj.add_child_elem(query)
+ result = self.server.invoke_successfully(fcp_obj, True)
+ # There can only be 1 FCP per vserver. If true, one is set up, else one isn't set up
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ return True
+ else:
+ return False
+
+ def current_status(self):
+ try:
+ status = self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-status'), True)
+ return status.get_child_content('is-available') == 'true'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying FCP: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_fcp", cserver)
+ exists = self.get_fcp()
+ changed = False
+ if self.parameters['state'] == 'present':
+ if exists:
+ if self.parameters['status'] == 'up':
+ if not self.current_status():
+ if not self.module.check_mode:
+ self.start_fcp()
+ changed = True
+ else:
+ if self.current_status():
+ if not self.module.check_mode:
+ self.stop_fcp()
+ changed = True
+ else:
+ if not self.module.check_mode:
+ self.create_fcp()
+ if self.parameters['status'] == 'up':
+ self.start_fcp()
+ elif self.parameters['status'] == 'down':
+ self.stop_fcp()
+ changed = True
+ else:
+ if exists:
+ if not self.module.check_mode:
+ if self.current_status():
+ self.stop_fcp()
+ self.destroy_fcp()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Start, Stop and Enable FCP services.
+ """
+ obj = NetAppOntapFCP()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py
new file mode 100644
index 00000000..e56ceeef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+
+module: na_ontap_file_directory_policy
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP create, delete, or modify vserver security file-directory policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.8.0
+description:
+ - Create, modify, or destroy vserver security file-directory policy
+ - Add or remove task from policy.
+ - Each time a policy/task is created/modified, automatically apply policy to vserver.
+
+options:
+ state:
+ description:
+ - Whether the specified policy or task should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the policy.
+ required: true
+ type: str
+
+ policy_name:
+ description:
+ - Specifies the name of the policy.
+ type: str
+ required: true
+
+ access_control:
+ description:
+ - Specifies the access control of task to be applied.
+ choices: ['file_directory', 'slag']
+ type: str
+
+ ntfs_mode:
+ description:
+ - Specifies NTFS Propagation Mode.
+ choices: ['propagate', 'ignore', 'replace']
+ type: str
+
+ ntfs_sd:
+ description:
+ - Specifies NTFS security descriptor identifier.
+ type: list
+ elements: str
+
+ path:
+ description:
+ - Specifies the file or folder path of the task.
+ - If path is specified and the policy which the task is adding to, does not exist, it will create the policy first then add the task to it.
+ - If path is specified, delete operation only removes task from policy.
+ type: str
+
+ security_type:
+ description:
+ - Specifies the type of security.
+ type: str
+ choices: ['ntfs', 'nfsv4']
+
+ ignore_broken_symlinks:
+ description:
+ - Skip Broken Symlinks.
+ - Options used when applying the policy to vserver.
+ type: bool
+
+"""
+
+EXAMPLES = """
+
+ - name: create policy
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ vserver: ansible
+ policy_name: file_policy
+ ignore_broken_symlinks: false
+
+ - name: add task to existing file_policy
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ vserver: ansible
+ policy_name: file_policy
+ path: /vol
+ ntfs_sd: ansible_sd
+ ntfs_mode: propagate
+
+ - name: delete task from file_policy.
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ vserver: ansible
+ policy_name: file_policy
+ path: /vol
+
+ - name: delete file_policy along with the tasks.
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ vserver: ansible
+ policy_name: file_policy
+
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapFilePolicy(object):
+
+ def __init__(self):
+ """
+ Initialize the Ontap file directory policy class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ access_control=dict(required=False, type='str', choices=['file_directory', 'slag']),
+ ntfs_mode=dict(required=False, choices=['propagate', 'ignore', 'replace']),
+ ntfs_sd=dict(required=False, type='list', elements='str'),
+ path=dict(required=False, type='str'),
+ security_type=dict(required=False, type='str', choices=['ntfs', 'nfsv4']),
+ ignore_broken_symlinks=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def create_policy(self):
+ policy_obj = netapp_utils.zapi.NaElement("file-directory-security-policy-create")
+ policy_obj.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating file-directory policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_policy_iter(self):
+ policy_get_iter = netapp_utils.zapi.NaElement('file-directory-security-policy-get-iter')
+ policy_info = netapp_utils.zapi.NaElement('file-directory-security-policy')
+ policy_info.add_new_child('vserver', self.parameters['vserver'])
+ policy_info.add_new_child('policy-name', self.parameters['policy_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_info)
+ policy_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(policy_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ policy = attributes_list.get_child_by_name('file-directory-security-policy')
+ return policy.get_child_content('policy-name')
+ return None
+
+ def remove_policy(self):
+ remove_policy = netapp_utils.zapi.NaElement('file-directory-security-policy-delete')
+ remove_policy.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(remove_policy, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error removing file-directory policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_task_iter(self):
+ task_get_iter = netapp_utils.zapi.NaElement('file-directory-security-policy-task-get-iter')
+ task_info = netapp_utils.zapi.NaElement('file-directory-security-policy-task')
+ task_info.add_new_child('vserver', self.parameters['vserver'])
+ task_info.add_new_child('policy-name', self.parameters['policy_name'])
+ task_info.add_new_child('path', self.parameters['path'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(task_info)
+ task_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(task_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching task from file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ task = attributes_list.get_child_by_name('file-directory-security-policy-task')
+ task_result = dict()
+ task_result['path'] = task.get_child_content('path')
+ if task.get_child_by_name('ntfs-mode'):
+ task_result['ntfs_mode'] = task.get_child_content('ntfs-mode')
+ if task.get_child_by_name('security-type'):
+ task_result['security_type'] = task.get_child_content('security-type')
+ if task.get_child_by_name('ntfs-sd'):
+ task_result['ntfs_sd'] = [ntfs_sd.get_content() for ntfs_sd in task.get_child_by_name('ntfs-sd').get_children()]
+ return task_result
+ return None
+
+ def add_task_to_policy(self):
+ policy_add_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-add')
+ policy_add_task.add_new_child('path', self.parameters['path'])
+ policy_add_task.add_new_child('policy-name', self.parameters['policy_name'])
+ if self.parameters.get('access_control') is not None:
+ policy_add_task.add_new_child('access-control', self.parameters['access_control'])
+ if self.parameters.get('ntfs_mode') is not None:
+ policy_add_task.add_new_child('ntfs-mode', self.parameters['ntfs_mode'])
+ if self.parameters.get('ntfs_sd') is not None:
+ ntfs_sds = netapp_utils.zapi.NaElement('ntfs-sd')
+ for ntfs_sd in self.parameters['ntfs_sd']:
+ ntfs_sds.add_new_child('file-security-ntfs-sd', ntfs_sd)
+ policy_add_task.add_child_elem(ntfs_sds)
+ if self.parameters.get('security_type') is not None:
+ policy_add_task.add_new_child('security-type', self.parameters['security_type'])
+ try:
+ self.server.invoke_successfully(policy_add_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding task to file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_task_from_policy(self):
+ policy_remove_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-remove')
+ policy_remove_task.add_new_child('path', self.parameters['path'])
+ policy_remove_task.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(policy_remove_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing task from file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_task(self, modify):
+ policy_modify_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-modify')
+ policy_modify_task.add_new_child('path', self.parameters['path'])
+ policy_modify_task.add_new_child('policy-name', self.parameters['policy_name'])
+ if modify.get('ntfs_mode') is not None:
+ policy_modify_task.add_new_child('ntfs-mode', self.parameters['ntfs_mode'])
+ if modify.get('ntfs_sd') is not None:
+ ntfs_sds = netapp_utils.zapi.NaElement('ntfs-sd')
+ for ntfs_sd in self.parameters['ntfs_sd']:
+ ntfs_sds.add_new_child('file-security-ntfs-sd', ntfs_sd)
+ policy_modify_task.add_child_elem(ntfs_sds)
+ if modify.get('security_type') is not None:
+ policy_modify_task.add_new_child('security-type', self.parameters['security_type'])
+ try:
+ self.server.invoke_successfully(policy_modify_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying task in file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_sd(self):
+ set_sd = netapp_utils.zapi.NaElement('file-directory-security-set')
+ set_sd.add_new_child('policy-name', self.parameters['policy_name'])
+ if self.parameters.get('ignore-broken-symlinks'):
+ set_sd.add_new_child('ignore-broken-symlinks', str(self.parameters['ignore_broken_symlinks']))
+ try:
+ self.server.invoke_successfully(set_sd, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error applying file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_file_directory_policy", self.server)
+ current = self.get_policy_iter()
+ cd_action, task_cd_action, task_modify = None, None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters.get('path'):
+ current_task = self.get_task_iter()
+ task_cd_action = self.na_helper.get_cd_action(current_task, self.parameters)
+ if task_cd_action is None and self.parameters['state'] == 'present':
+ task_modify = self.na_helper.get_modified_attributes(current_task, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.parameters.get('path'):
+ if task_cd_action == 'create':
+ # if policy doesn't exist, create the policy first.
+ if cd_action == 'create':
+ self.create_policy()
+ self.add_task_to_policy()
+ self.set_sd()
+ elif task_cd_action == 'delete':
+ # delete the task, not the policy.
+ self.remove_task_from_policy()
+ elif task_modify:
+ self.modify_task(task_modify)
+ self.set_sd()
+ else:
+ if cd_action == 'create':
+ self.create_policy()
+ self.set_sd()
+ elif cd_action == 'delete':
+ self.remove_policy()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates, deletes and modifies file directory policy
+ """
+ obj = NetAppOntapFilePolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py
new file mode 100644
index 00000000..3192183a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py
@@ -0,0 +1,366 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_firewall_policy
+short_description: NetApp ONTAP Manage a firewall policy
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Configure firewall on an ONTAP node and manage firewall policy for an ONTAP SVM
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+requirements:
+ - Python package ipaddress. Install using 'pip install ipaddress'
+options:
+ state:
+ description:
+ - Whether to set up a firewall policy or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ allow_list:
+ description:
+ - A list of IPs and masks to use.
+ - The host bits of the IP addresses used in this list must be set to 0.
+ type: list
+ elements: str
+ policy:
+ description:
+ - A policy name for the firewall policy
+ type: str
+ service:
+ description:
+ - The service to apply the policy to
+ - https and ssh are not supported starting with ONTAP 9.6
+ - portmap is supported for ONTAP 9.4, 9.5 and 9.6
+ choices: ['dns', 'http', 'https', 'ndmp', 'ndmps', 'ntp', 'portmap', 'rsh', 'snmp', 'ssh', 'telnet']
+ type: str
+ vserver:
+ description:
+ - The Vserver to apply the policy to.
+ type: str
+ enable:
+ description:
+ - enable firewall on a node
+ choices: ['enable', 'disable']
+ type: str
+ logging:
+ description:
+ - enable logging for firewall on a node
+ choices: ['enable', 'disable']
+ type: str
+ node:
+ description:
+ - The node to run the firewall configuration on
+ type: str
+'''
+
+EXAMPLES = """
+ - name: create firewall Policy
+ na_ontap_firewall_policy:
+ state: present
+ allow_list: [1.2.3.0/24,1.3.0.0/16]
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Modify firewall Policy
+ na_ontap_firewall_policy:
+ state: present
+ allow_list: [1.5.3.0/24]
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Destory firewall Policy
+ na_ontap_firewall_policy:
+ state: absent
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Enable firewall and logging on a node
+ na_ontap_firewall_policy:
+ node: test-vsim1
+ enable: enable
+ logging: enable
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+try:
+ import ipaddress
+ HAS_IPADDRESS_LIB = True
+except ImportError:
+ HAS_IPADDRESS_LIB = False
+
+import sys
+# Python 3 merged unicode in to str, this is to make sure nothing breaks
+# https://stackoverflow.com/questions/19877306/nameerror-global-name-unicode-is-not-defined-in-python-3
+if sys.version_info[0] >= 3:
+ unicode = str
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPFirewallPolicy(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ allow_list=dict(required=False, type='list', elements='str'),
+ policy=dict(required=False, type='str'),
+ service=dict(required=False, type='str', choices=['dns', 'http', 'https', 'ndmp', 'ndmps',
+ 'ntp', 'portmap', 'rsh', 'snmp', 'ssh', 'telnet']),
+ vserver=dict(required=False, type="str"),
+ enable=dict(required=False, type="str", choices=['enable', 'disable']),
+ logging=dict(required=False, type="str", choices=['enable', 'disable']),
+ node=dict(required=False, type="str")
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=(['policy', 'service', 'vserver'],
+ ['enable', 'node']
+ ),
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ if HAS_IPADDRESS_LIB is False:
+ self.module.fail_json(msg="the python ipaddress lib is required for this module")
+ return
+
+ def validate_ip_addresses(self):
+ '''
+ Validate if the given IP address is a network address (i.e. it's host bits are set to 0)
+ ONTAP doesn't validate if the host bits are set,
+ and hence doesn't add a new address unless the IP is from a different network.
+ So this validation allows the module to be idempotent.
+ :return: None
+ '''
+ for ip in self.parameters['allow_list']:
+ # create an IPv4 object for current IP address
+ if sys.version_info[0] >= 3:
+ ip_addr = str(ip)
+ else:
+ ip_addr = unicode(ip) # pylint: disable=undefined-variable
+ # get network address from netmask, throw exception if address is not a network address
+ try:
+ ipaddress.ip_network(ip_addr)
+ except ValueError as exc:
+ self.module.fail_json(msg='Error: Invalid IP address value for allow_list parameter.'
+ 'Please specify a network address without host bits set: %s'
+ % (to_native(exc)))
+
+ def get_firewall_policy(self):
+ """
+ Get a firewall policy
+ :return: returns a firewall policy object, or returns False if there are none
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-get-iter")
+ attributes = {
+ 'query': {
+ 'net-firewall-policy-info': self.firewall_policy_attributes()
+ }
+ }
+ net_firewall_policy_obj.translate_struct(attributes)
+
+ try:
+ result = self.server.invoke_successfully(net_firewall_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting firewall policy %s:%s" % (self.parameters['policy'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ policy_info = attributes_list.get_child_by_name('net-firewall-policy-info')
+ ips = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=policy_info.get_child_by_name('allow-list'))
+ return {
+ 'service': policy_info['service'],
+ 'allow_list': ips}
+ return None
+
+ def create_firewall_policy(self):
+ """
+ Create a firewall policy for given vserver
+ :return: None
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-create")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ if self.parameters.get('allow_list'):
+ self.validate_ip_addresses()
+ net_firewall_policy_obj.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent='allow-list',
+ zapi_child='ip-and-mask',
+ data=self.parameters['allow_list'])
+ )
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def destroy_firewall_policy(self):
+ """
+ Destroy a Firewall Policy from a vserver
+ :return: None
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-destroy")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error destroying Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def modify_firewall_policy(self, modify):
+ """
+ Modify a firewall Policy on a vserver
+ :return: none
+ """
+ self.validate_ip_addresses()
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-modify")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ net_firewall_policy_obj.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent='allow-list',
+ zapi_child='ip-and-mask',
+ data=modify['allow_list']))
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def firewall_policy_attributes(self):
+ return {
+ 'policy': self.parameters['policy'],
+ 'service': self.parameters['service'],
+ 'vserver': self.parameters['vserver'],
+ }
+
+ def get_firewall_config_for_node(self):
+ """
+ Get firewall configuration on the node
+ :return: dict() with firewall config details
+ """
+ if self.parameters.get('logging'):
+ if self.parameters.get('node') is None:
+ self.module.fail_json(msg='Error: Missing parameter \'node\' to modify firewall logging')
+ net_firewall_config_obj = netapp_utils.zapi.NaElement("net-firewall-config-get")
+ net_firewall_config_obj.add_new_child('node-name', self.parameters['node'])
+ try:
+ result = self.server.invoke_successfully(net_firewall_config_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting Firewall Configuration: %s" % (to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes'):
+ firewall_info = result['attributes'].get_child_by_name('net-firewall-config-info')
+ return {'enable': self.change_status_to_bool(firewall_info.get_child_content('is-enabled'), to_zapi=False),
+ 'logging': self.change_status_to_bool(firewall_info.get_child_content('is-logging'), to_zapi=False)}
+ return None
+
+ def modify_firewall_config(self, modify):
+ """
+ Modify the configuration of a firewall on node
+ :return: None
+ """
+ net_firewall_config_obj = netapp_utils.zapi.NaElement("net-firewall-config-modify")
+ net_firewall_config_obj.add_new_child('node-name', self.parameters['node'])
+ if modify.get('enable'):
+ net_firewall_config_obj.add_new_child('is-enabled', self.change_status_to_bool(self.parameters['enable']))
+ if modify.get('logging'):
+ net_firewall_config_obj.add_new_child('is-logging', self.change_status_to_bool(self.parameters['logging']))
+ try:
+ self.server.invoke_successfully(net_firewall_config_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying Firewall Config: %s" % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def change_status_to_bool(self, input, to_zapi=True):
+ if to_zapi:
+ return 'true' if input == 'enable' else 'false'
+ else:
+ return 'enable' if input == 'true' else 'disable'
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_firewall_policy", cserver)
+
+ def apply(self):
+ self.autosupport_log()
+ cd_action, modify, modify_config = None, None, None
+ if self.parameters.get('policy'):
+ current = self.get_firewall_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.parameters.get('node'):
+ current_config = self.get_firewall_config_for_node()
+ # firewall config for a node is always present, we cannot create or delete a firewall on a node
+ modify_config = self.na_helper.get_modified_attributes(current_config, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_firewall_policy()
+ elif cd_action == 'delete':
+ self.destroy_firewall_policy()
+ else:
+ if modify:
+ self.modify_firewall_policy(modify)
+ if modify_config:
+ self.modify_firewall_config(modify_config)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ :return: nothing
+ """
+ cg_obj = NetAppONTAPFirewallPolicy()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py
new file mode 100644
index 00000000..0288506c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py
@@ -0,0 +1,737 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Update ONTAP service-prosessor firmware
+ - The recommend procedure is to
+ 1. download the firmware package from the NetApp Support site
+ 2. copy the package to a web server
+ 3. download the package from the web server using this module
+ - Once a disk qualification, disk, shelf, or ACP firmware package is downloaded, ONTAP will automatically update the related resources in background.
+ - It may take some time to complete.
+ - For service processor, the update requires a node reboot to take effect.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_firmware_upgrade
+options:
+ state:
+ description:
+ - Whether the specified ONTAP firmware should be upgraded or not.
+ default: present
+ type: str
+ node:
+ description:
+ - Node on which the device is located.
+ - Not required if package_url is present and force_disruptive_update is False.
+ - If this option is not given, the firmware will be downloaded on all nodes in the cluster,
+ - and the resources will be updated in background on all nodes, except for service processor.
+ - For service processor, the upgrade will happen automatically when each node is rebooted.
+ type: str
+ clear_logs:
+ description:
+ - Clear logs on the device after update. Default value is true.
+ - Not used if force_disruptive_update is False.
+ type: bool
+ default: true
+ package:
+ description:
+ - Name of the package file containing the firmware to be installed. Not required when -baseline is true.
+ - Not used if force_disruptive_update is False.
+ type: str
+ package_url:
+ description:
+ - URL of the package file containing the firmware to be downloaded.
+ - Once the package file is downloaded to a node, the firmware update will happen automatically in background.
+ - For SP, the upgrade will happen automatically when a node is rebooted.
+ - For SP, the upgrade will happen automatically if autoupdate is enabled (which is the recommended setting).
+ version_added: "20.4.1"
+ type: str
+ force_disruptive_update:
+ description:
+ - If set to C(False), and URL is given, the upgrade is non disruptive. If URL is not given, no operation is performed.
+ - Do not set this to C(True), unless directed by NetApp Tech Support.
+ - It will force an update even if the resource is not ready for it, and can be disruptive.
+ type: bool
+ version_added: "20.4.1"
+ default: False
+ shelf_module_fw:
+ description:
+ - Shelf module firmware to be updated to.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ type: str
+ disk_fw:
+ description:
+ - disk firmware to be updated to.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ type: str
+ update_type:
+ description:
+ - Type of firmware update to be performed. Options include serial_full, serial_differential, network_full.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ type: str
+ install_baseline_image:
+ description:
+ - Install the version packaged with ONTAP if this parameter is set to true. Otherwise, package must be used to specify the package to install.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ type: bool
+ default: false
+ firmware_type:
+ description:
+ - Type of firmware to be upgraded. Options include shelf, ACP, service-processor, and disk.
+ - For shelf firmware upgrade the operation is asynchronous, and therefore returns no errors that might occur during the download process.
+ - Shelf firmware upgrade is idempotent if shelf_module_fw is provided .
+ - disk firmware upgrade is idempotent if disk_fw is provided .
+ - With check mode, SP, ACP, disk, and shelf firmware upgrade is not idempotent.
+ - This operation will only update firmware on shelves/disk that do not have the latest firmware-revision.
+ - Not used if force_disruptive_update is False (ONTAP will automatically detect the firmware type)
+ choices: ['service-processor', 'shelf', 'acp', 'disk']
+ type: str
+ fail_on_502_error:
+ description:
+ - The firmware download may take time if the web server is slow and if there are many nodes in the cluster.
+ - ONTAP will break the ZAPI connection after 5 minutes with a 502 Bad Gateway error, even though the download \
+is still happening.
+ - By default, this module ignores this error and assumes the download is progressing as ONTAP does not \
+provide a way to check the status.
+ - When setting this option to true, the module will report 502 as an error.
+ type: bool
+ default: false
+ version_added: "20.6.0"
+ rename_package:
+ description:
+ - Rename the package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ type: str
+ version_added: "20.6.1"
+ replace_package:
+ description:
+ - Replace the local package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ type: bool
+ version_added: "20.6.1"
+ reboot_sp:
+ description:
+ - Reboot service processor before downloading package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ type: bool
+ default: true
+ version_added: "20.6.1"
+short_description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk.
+version_added: 2.9.0
+'''
+
+EXAMPLES = """
+
+ - name: firmware upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: firmware upgrade, confirm successful download
+ na_ontap_firmware_upgrade:
+ state: present
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ fail_on_502_error: true
+ - name: SP firmware upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package: "{{ file name }}"
+ package_url: "{{ web_link }}"
+ clear_logs: True
+ install_baseline_image: False
+ update_type: serial_full
+ force_disruptive_update: False
+ firmware_type: service-processor
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: SP firmware download replace package
+ tags:
+ - sp_download
+ na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package_url: "{{ web_link }}"
+ firmware_type: service-processor
+ replace_package: true
+ reboot_sp: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ - name: SP firmware download rename package
+ tags:
+ - sp_download
+ na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package_url: "{{ web_link }}"
+ firmware_type: service-processor
+ rename_package: SP_FW.zip
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ - name: ACP firmware download and upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ firmware_type: acp
+ force_disruptive_update: False
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: shelf firmware upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ firmware_type: shelf
+ shelf_module_fw: 1221
+ force_disruptive_update: False
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: disk firmware upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ firmware_type: disk
+ disk_fw: NA02
+ force_disruptive_update: False
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+msg:
+ description: Returns additional information in case of success.
+ returned: always
+ type: str
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import time
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+MSGS = dict(
+ no_action='No action taken.',
+ dl_completed='Firmware download completed.',
+ dl_completed_slowly='Firmware download completed, slowly.',
+ dl_in_progress='Firmware download still in progress.'
+)
+
+
+class NetAppONTAPFirmwareUpgrade(object):
+ """
+ Class with ONTAP firmware upgrade methods
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ node=dict(required=False, type='str'),
+ firmware_type=dict(required=False, type='str', choices=['service-processor', 'shelf', 'acp', 'disk']),
+ clear_logs=dict(required=False, type='bool', default=True),
+ package=dict(required=False, type='str'),
+ install_baseline_image=dict(required=False, type='bool', default=False),
+ update_type=dict(required=False, type='str'),
+ shelf_module_fw=dict(required=False, type='str'),
+ disk_fw=dict(required=False, type='str'),
+ package_url=dict(required=False, type='str'),
+ force_disruptive_update=dict(required=False, type='bool', default=False),
+ fail_on_502_error=dict(required=False, type='bool', default=False),
+ rename_package=dict(required=False, type='str'),
+ replace_package=dict(required=False, type='bool'),
+ reboot_sp=dict(required=False, type='bool', default=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('firmware_type', 'acp', ['node']),
+ ('firmware_type', 'disk', ['node']),
+ ('firmware_type', 'service-processor', ['node']),
+ ('force_disruptive_update', True, ['firmware_type']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('firmware_type') == 'service-processor':
+ if self.parameters.get('install_baseline_image') and self.parameters.get('package') is not None:
+ self.module.fail_json(msg='Do not specify both package and install_baseline_image: true')
+ if not self.parameters.get('package') and self.parameters.get('install_baseline_image') == 'False':
+ self.module.fail_json(msg='Specify at least one of package or install_baseline_image')
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ def firmware_image_get_iter(self):
+ """
+ Compose NaElement object to query current firmware version
+ :return: NaElement object for firmware_image_get_iter with query
+ """
+ firmware_image_get = netapp_utils.zapi.NaElement('service-processor-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ firmware_image_info = netapp_utils.zapi.NaElement('service-processor-info')
+ firmware_image_info.add_new_child('node', self.parameters['node'])
+ query.add_child_elem(firmware_image_info)
+ firmware_image_get.add_child_elem(query)
+ return firmware_image_get
+
+ def firmware_image_get(self, node_name):
+ """
+ Get current firmware image info
+ :return: True if query successful, else return None
+ """
+ firmware_image_get_iter = self.firmware_image_get_iter()
+ try:
+ result = self.server.invoke_successfully(firmware_image_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching firmware image details: %s: %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+ # return firmware image details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ sp_info = result.get_child_by_name('attributes-list').get_child_by_name('service-processor-info')
+ firmware_version = sp_info.get_child_content('firmware-version')
+ return firmware_version
+ return None
+
+ def acp_firmware_required_get(self):
+ """
+ where acp firmware upgrade is required
+ :return: True is firmware upgrade is required else return None
+ """
+ acp_firmware_get_iter = netapp_utils.zapi.NaElement('storage-shelf-acp-module-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ acp_info = netapp_utils.zapi.NaElement('storage-shelf-acp-module')
+ query.add_child_elem(acp_info)
+ acp_firmware_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(acp_firmware_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching acp firmware details details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('attributes-list').get_child_by_name('storage-shelf-acp-module'):
+ acp_module_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'storage-shelf-acp-module')
+ state = acp_module_info.get_child_content('state')
+ if state == 'firmware_update_required':
+ # acp firmware version upgrade required
+ return True
+ return False
+
+ def sp_firmware_image_update_progress_get(self, node_name):
+ """
+ Get current firmware image update progress info
+ :return: Dictionary of firmware image update progress if query successful, else return None
+ """
+ firmware_update_progress_get = netapp_utils.zapi.NaElement('service-processor-image-update-progress-get')
+ firmware_update_progress_get.add_new_child('node', self.parameters['node'])
+
+ firmware_update_progress_info = dict()
+ try:
+ result = self.server.invoke_successfully(firmware_update_progress_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching firmware image upgrade progress details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ # return firmware image update progress details
+ if result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info'):
+ update_progress_info = result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info')
+ firmware_update_progress_info['is-in-progress'] = update_progress_info.get_child_content('is-in-progress')
+ firmware_update_progress_info['node'] = update_progress_info.get_child_content('node')
+ return firmware_update_progress_info
+
+ def shelf_firmware_info_get(self):
+ """
+ Get the current firmware of shelf module
+ :return:dict with module id and firmware info
+ """
+ shelf_id_fw_info = dict()
+ shelf_firmware_info_get = netapp_utils.zapi.NaElement('storage-shelf-info-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ storage_shelf_info = netapp_utils.zapi.NaElement('storage-shelf-info')
+ shelf_module = netapp_utils.zapi.NaElement('shelf-modules')
+ shelf_module_info = netapp_utils.zapi.NaElement('storage-shelf-module-info')
+ shelf_module.add_child_elem(shelf_module_info)
+ storage_shelf_info.add_child_elem(shelf_module)
+ desired_attributes.add_child_elem(storage_shelf_info)
+ shelf_firmware_info_get.add_child_elem(desired_attributes)
+
+ try:
+ result = self.server.invoke_successfully(shelf_firmware_info_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching shelf module firmware details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ shelf_info = result.get_child_by_name('attributes-list').get_child_by_name('storage-shelf-info')
+ if (shelf_info.get_child_by_name('shelf-modules') and
+ shelf_info.get_child_by_name('shelf-modules').get_child_by_name('storage-shelf-module-info')):
+ shelves = shelf_info['shelf-modules'].get_children()
+ for shelf in shelves:
+ shelf_id_fw_info[shelf.get_child_content('module-id')] = shelf.get_child_content('module-fw-revision')
+ return shelf_id_fw_info
+
+ def disk_firmware_info_get(self):
+ """
+ Get the current firmware of disks module
+ :return:
+ """
+ disk_id_fw_info = dict()
+ disk_firmware_info_get = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ storage_disk_info = netapp_utils.zapi.NaElement('storage-disk-info')
+ disk_inv = netapp_utils.zapi.NaElement('disk-inventory-info')
+ storage_disk_info.add_child_elem(disk_inv)
+ desired_attributes.add_child_elem(storage_disk_info)
+ disk_firmware_info_get.add_child_elem(desired_attributes)
+ try:
+ result = self.server.invoke_successfully(disk_firmware_info_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching disk module firmware details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ disk_info = result.get_child_by_name('attributes-list')
+ disks = disk_info.get_children()
+ for disk in disks:
+ disk_id_fw_info[disk.get_child_content('disk-uid')] = disk.get_child_by_name('disk-inventory-info').get_child_content('firmware-revision')
+ return disk_id_fw_info
+
+ def disk_firmware_required_get(self):
+ """
+ Check weather disk firmware upgrade is required or not
+ :return: True if the firmware upgrade is required
+ """
+ disk_firmware_info = self.disk_firmware_info_get()
+ for disk in disk_firmware_info:
+ if (disk_firmware_info[disk]) != self.parameters['disk_fw']:
+ return True
+ return False
+
+ def shelf_firmware_required_get(self):
+ """
+ Check weather shelf firmware upgrade is required or not
+ :return: True if the firmware upgrade is required
+ """
+ shelf_firmware_info = self.shelf_firmware_info_get()
+ for module in shelf_firmware_info:
+ if (shelf_firmware_info[module]) != self.parameters['shelf_module_fw']:
+ return True
+ return False
+
+ def sp_firmware_image_update(self):
+ """
+ Update current firmware image
+ """
+ firmware_update_info = netapp_utils.zapi.NaElement('service-processor-image-update')
+ if self.parameters.get('package') is not None:
+ firmware_update_info.add_new_child('package', self.parameters['package'])
+ if self.parameters.get('clear_logs') is not None:
+ firmware_update_info.add_new_child('clear-logs', str(self.parameters['clear_logs']))
+ if self.parameters.get('install_baseline_image') is not None:
+ firmware_update_info.add_new_child('install-baseline-image', str(self.parameters['install_baseline_image']))
+ firmware_update_info.add_new_child('node', self.parameters['node'])
+ firmware_update_info.add_new_child('update-type', self.parameters['update_type'])
+
+ try:
+ self.server.invoke_successfully(firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Current firmware version matches the version to be installed
+ if to_native(error.code) == '13001' and (error.message.startswith('Service Processor update skipped')):
+ return False
+ self.module.fail_json(msg='Error updating firmware image for %s: %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def shelf_firmware_upgrade(self):
+ """
+ Upgrade shelf firmware image
+ """
+ shelf_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-firmware-update')
+ try:
+ self.server.invoke_successfully(shelf_firmware_update_info, enable_tunneling=True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating shelf firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+
+ def acp_firmware_upgrade(self):
+
+ """
+ Upgrade shelf firmware image
+ """
+ acp_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-acp-firmware-update')
+ acp_firmware_update_info.add_new_child('node-name', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(acp_firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating acp firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+
+ def disk_firmware_upgrade(self):
+
+ """
+ Upgrade disk firmware
+ """
+ disk_firmware_update_info = netapp_utils.zapi.NaElement('disk-update-disk-fw')
+ disk_firmware_update_info.add_new_child('node-name', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(disk_firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating disk firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ return True
+
+ def download_firmware(self):
+ ''' calls the system-cli ZAPI as there is no ZAPI for this feature '''
+ msg = MSGS['dl_completed']
+ command = ['storage', 'firmware', 'download', '-node', self.parameters['node'] if self.parameters.get('node') else '*',
+ '-package-url', self.parameters['package_url']]
+ command_obj = netapp_utils.zapi.NaElement("system-cli")
+
+ args_obj = netapp_utils.zapi.NaElement("args")
+ for arg in command:
+ args_obj.add_new_child('arg', arg)
+ command_obj.add_child_elem(args_obj)
+ command_obj.add_new_child('priv', 'advanced')
+
+ output = None
+ try:
+ output = self.server.invoke_successfully(command_obj, True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ # with nettap_lib, error.code may be a number or a string
+ try:
+ err_num = int(error.code)
+ except ValueError:
+ err_num = -1
+ if err_num == 60: # API did not finish on time
+ # even if the ZAPI reports a timeout error, it does it after the command completed
+ msg = MSGS['dl_completed_slowly']
+ elif err_num == 502 and not self.parameters['fail_on_502_error']: # Bad Gateway
+ # ONTAP proxy breaks the connection after 5 minutes, we can assume the download is progressing slowly
+ msg = MSGS['dl_in_progress']
+ else:
+ self.module.fail_json(msg='Error running command %s: %s' % (command, to_native(error)),
+ exception=traceback.format_exc())
+ except netapp_utils.zapi.etree.XMLSyntaxError as error:
+ self.module.fail_json(msg='Error decoding output from command %s: %s' % (command, to_native(error)),
+ exception=traceback.format_exc())
+
+ if output is not None:
+ # command completed, check for success
+ status = output.get_attr('status')
+ cli_output = output.get_child_content('cli-output')
+ if status is None or status != 'passed' or cli_output is None or cli_output == "":
+ if status is None:
+ extra_info = "'status' attribute missing"
+ elif status != 'passed':
+ extra_info = "check 'status' value"
+ else:
+ extra_info = 'check console permissions'
+ self.module.fail_json(msg='unable to download package from %s: %s. Received: %s' %
+ (self.parameters['package_url'], extra_info, output.to_string()))
+
+ if cli_output is not None:
+ if cli_output.startswith('Error:') or \
+ 'Failed to download package from' in cli_output:
+ self.module.fail_json(msg='failed to download package from %s: %s' % (self.parameters['package_url'], cli_output))
+ msg += " Extra info: %s" % cli_output
+
+ return msg
+
+ def download_sp_image(self):
+ fetch_package = netapp_utils.zapi.NaElement('system-image-fetch-package')
+ fetch_package.add_new_child('node', self.parameters['node'])
+ fetch_package.add_new_child('package', self.parameters['package_url'])
+ if self.parameters.get('rename_package'):
+ fetch_package.add_new_child('rename-package', self.parameters['rename_package'])
+ if self.parameters.get('replace_package'):
+ fetch_package.add_new_child('replace-package', str(self.parameters['replace_package']))
+ try:
+ self.server.invoke_successfully(fetch_package, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching system image package from %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def download_sp_image_progress(self):
+ progress = netapp_utils.zapi.NaElement('system-image-update-progress-get')
+ progress.add_new_child('node', self.parameters['node'])
+ progress_info = dict()
+ try:
+ result = self.server.invoke_successfully(progress, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching system image package download progress: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('phase'):
+ progress_info['phase'] = result.get_child_content('phase')
+ else:
+ progress_info['phase'] = None
+ if result.get_child_by_name('exit-message'):
+ progress_info['exit_message'] = result.get_child_content('exit-message')
+ else:
+ progress_info['exit_message'] = None
+ if result.get_child_by_name('exit-status'):
+ progress_info['exit_status'] = result.get_child_content('exit-status')
+ else:
+ progress_info['exit_status'] = None
+ if result.get_child_by_name('last-message'):
+ progress_info['last_message'] = result.get_child_content('last-message')
+ else:
+ progress_info['last_message'] = None
+ if result.get_child_by_name('run-status'):
+ progress_info['run_status'] = result.get_child_content('run-status')
+ else:
+ progress_info['run_status'] = None
+ return progress_info
+
+ def reboot_sp(self):
+ reboot = netapp_utils.zapi.NaElement('service-processor-reboot')
+ reboot.add_new_child('node', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(reboot, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error rebooting service processor: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def download_sp_firmware(self):
+ if self.parameters.get('reboot_sp'):
+ self.reboot_sp()
+ self.download_sp_image()
+ progress = self.download_sp_image_progress()
+ # progress only show the current or most recent update/install operation.
+ if progress['phase'] == 'Download':
+ while progress['run_status'] is not None and progress['run_status'] != 'Exited':
+ time.sleep(10)
+ progress = self.download_sp_image_progress()
+ if progress['exit_status'] != 'Success':
+ self.module.fail_json(msg=progress['exit_message'], exception=traceback.format_exc())
+ return MSGS['dl_completed']
+ return MSGS['no_action']
+
+ def autosupport_log(self):
+ """
+ Autosupport log for software_update
+ :return:
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_firmware_upgrade", cserver)
+
+ def apply(self):
+ """
+ Apply action to upgrade firmware
+ """
+ changed = False
+ msg = MSGS['no_action']
+ self.autosupport_log()
+ firmware_update_progress = dict()
+ if self.parameters.get('package_url'):
+ if not self.module.check_mode:
+ if self.parameters.get('firmware_type') == 'service-processor':
+ msg = self.download_sp_firmware()
+ else:
+ msg = self.download_firmware()
+ changed = True
+ if not self.parameters['force_disruptive_update']:
+ # disk_qual, disk, shelf, and ACP are automatically updated in background
+ # The SP firmware is automatically updated on reboot
+ self.module.exit_json(changed=changed, msg=msg)
+ if msg == MSGS['dl_in_progress']:
+ # can't force an update if the software is still downloading
+ self.module.fail_json(msg="Cannot force update: %s" % msg)
+ if self.parameters.get('firmware_type') == 'service-processor':
+ # service-processor firmware upgrade
+ current = self.firmware_image_get(self.parameters['node'])
+
+ if self.parameters.get('state') == 'present' and current:
+ if not self.module.check_mode:
+ if self.sp_firmware_image_update():
+ changed = True
+ firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node'])
+ while firmware_update_progress.get('is-in-progress') == 'true':
+ time.sleep(25)
+ firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node'])
+ else:
+ # we don't know until we try the upgrade
+ changed = True
+
+ elif self.parameters.get('firmware_type') == 'shelf':
+ # shelf firmware upgrade
+ if self.parameters.get('shelf_module_fw'):
+ if self.shelf_firmware_required_get():
+ if not self.module.check_mode:
+ changed = self.shelf_firmware_upgrade()
+ else:
+ changed = True
+ else:
+ if not self.module.check_mode:
+ changed = self.shelf_firmware_upgrade()
+ else:
+ # we don't know until we try the upgrade -- assuming the worst
+ changed = True
+ elif self.parameters.get('firmware_type') == 'acp':
+ # acp firmware upgrade
+ if self.acp_firmware_required_get():
+ if not self.module.check_mode:
+ self.acp_firmware_upgrade()
+ changed = True
+ elif self.parameters.get('firmware_type') == 'disk':
+ # Disk firmware upgrade
+ if self.parameters.get('disk_fw'):
+ if self.disk_firmware_required_get():
+ if not self.module.check_mode:
+ changed = self.disk_firmware_upgrade()
+ else:
+ changed = True
+ else:
+ if not self.module.check_mode:
+ changed = self.disk_firmware_upgrade()
+ else:
+ # we don't know until we try the upgrade -- assuming the worst
+ changed = True
+
+ self.module.exit_json(changed=changed, msg='forced update for %s' % self.parameters.get('firmware_type'))
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPFirmwareUpgrade()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py
new file mode 100644
index 00000000..9dea364b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py
@@ -0,0 +1,470 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP FlexCache - create/delete relationship
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete FlexCache volume relationships
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_flexcache
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified relationship should exist or not.
+ default: present
+ type: str
+ origin_volume:
+ description:
+ - Name of the origin volume for the FlexCache.
+ - Required for creation.
+ type: str
+ origin_vserver:
+ description:
+ - Name of the origin vserver for the FlexCache.
+ - Required for creation.
+ type: str
+ origin_cluster:
+ description:
+ - Name of the origin cluster for the FlexCache.
+ - Defaults to cluster associated with target vserver if absent.
+ - Not used for creation.
+ type: str
+ volume:
+ description:
+ - Name of the target volume for the FlexCache.
+ required: true
+ type: str
+ junction_path:
+ description:
+ - Junction path of the cache volume.
+ type: str
+ auto_provision_as:
+ description:
+ - Use this parameter to automatically select existing aggregates for volume provisioning.Eg flexgroup
+ - Note that the fastest aggregate type with at least one aggregate on each node of the cluster will be selected.
+ type: str
+ size:
+ description:
+ - Size of cache volume.
+ type: int
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: gb
+ vserver:
+ description:
+ - Name of the target vserver for the FlexCache.
+ - Note that hostname, username, password are intended for the target vserver.
+ required: true
+ type: str
+ aggr_list:
+ description:
+ - List of aggregates to host target FlexCache volume.
+ type: list
+ elements: str
+ aggr_list_multiplier:
+ description:
+ - Aggregate list repeat count.
+ type: int
+ force_unmount:
+ description:
+ - Unmount FlexCache volume. Delete the junction path at which the volume is mounted before deleting the FlexCache relationship.
+ type: bool
+ default: false
+ force_offline:
+ description:
+ - Offline FlexCache volume before deleting the FlexCache relationship.
+ - The volume will be destroyed and data can be lost.
+ type: bool
+ default: false
+ time_out:
+ description:
+ - time to wait for flexcache creation or deletion in seconds
+ - if 0, the request is asynchronous
+ - default is set to 3 minutes
+ type: int
+ default: 180
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexCache
+ na_ontap_FlexCache:
+ state: present
+ origin_volume: test_src
+ volume: test_dest
+ origin_vserver: ansible_src
+ vserver: ansible_dest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete FlexCache
+ na_ontap_FlexCache:
+ state: absent
+ volume: test_dest
+ vserver: ansible_dest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPFlexCache(object):
+ """
+ Class with FlexCache methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'],
+ default='present'),
+ origin_volume=dict(required=False, type='str'),
+ origin_vserver=dict(required=False, type='str'),
+ origin_cluster=dict(required=False, type='str'),
+ auto_provision_as=dict(required=False, type='str'),
+ volume=dict(required=True, type='str'),
+ junction_path=dict(required=False, type='str'),
+ size=dict(required=False, type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ vserver=dict(required=True, type='str'),
+ aggr_list=dict(required=False, type='list', elements='str'),
+ aggr_list_multiplier=dict(required=False, type='int'),
+ force_offline=dict(required=False, type='bool', default=False),
+ force_unmount=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=180),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ('aggr_list', 'auto_provision_as'),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * \
+ netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+ # setup later if required
+ self.origin_server = None
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def add_parameter_to_dict(self, adict, name, key=None, tostr=False):
+ ''' add defined parameter (not None) to adict using key '''
+ if key is None:
+ key = name
+ if self.parameters.get(name) is not None:
+ if tostr:
+ adict[key] = str(self.parameters.get(name))
+ else:
+ adict[key] = self.parameters.get(name)
+
+ def get_job(self, jobid, server):
+ """
+ Get job details by id
+ """
+ job_get = netapp_utils.zapi.NaElement('job-get')
+ job_get.add_new_child('job-id', jobid)
+ try:
+ result = server.invoke_successfully(job_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # Not found
+ return None
+ self.module.fail_json(msg='Error fetching job info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ results = dict()
+ job_info = result.get_child_by_name('attributes').get_child_by_name('job-info')
+ results = {
+ 'job-progress': job_info['job-progress'],
+ 'job-state': job_info['job-state']
+ }
+ if job_info.get_child_by_name('job-completion') is not None:
+ results['job-completion'] = job_info['job-completion']
+ else:
+ results['job-completion'] = None
+ return results
+
+ def check_job_status(self, jobid):
+ """
+ Loop until job is complete
+ """
+ server = self.server
+ sleep_time = 5
+ time_out = self.parameters['time_out']
+ while time_out > 0:
+ results = self.get_job(jobid, server)
+ # If running as cluster admin, the job is owned by cluster vserver
+ # rather than the target vserver.
+ if results is None and server == self.server:
+ results = netapp_utils.get_cserver(self.server)
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ continue
+ if results is None:
+ error = 'cannot locate job with id: %d' % jobid
+ break
+ if results['job-state'] in ('queued', 'running'):
+ time.sleep(sleep_time)
+ time_out -= sleep_time
+ continue
+ if results['job-state'] in ('success', 'failure'):
+ break
+ else:
+ self.module.fail_json(msg='Unexpected job status in: %s' % repr(results))
+
+ if results is not None:
+ if results['job-state'] == 'success':
+ error = None
+ elif results['job-state'] in ('queued', 'running'):
+ error = 'job completion exceeded expected timer of: %s seconds' % \
+ self.parameters['time_out']
+ else:
+ if results['job-completion'] is not None:
+ error = results['job-completion']
+ else:
+ error = results['job-progress']
+ return error
+
+ def flexcache_get_iter(self):
+ """
+ Compose NaElement object to query current FlexCache relation
+ """
+ options = {'volume': self.parameters['volume']}
+ self.add_parameter_to_dict(options, 'origin_volume', 'origin-volume')
+ self.add_parameter_to_dict(options, 'origin_vserver', 'origin-vserver')
+ self.add_parameter_to_dict(options, 'origin_cluster', 'origin-cluster')
+ flexcache_info = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'flexcache-info', **options)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(flexcache_info)
+ flexcache_get_iter = netapp_utils.zapi.NaElement('flexcache-get-iter')
+ flexcache_get_iter.add_child_elem(query)
+ return flexcache_get_iter
+
+ def flexcache_get(self):
+ """
+ Get current FlexCache relations
+ :return: Dictionary of current FlexCache details if query successful, else None
+ """
+ flexcache_get_iter = self.flexcache_get_iter()
+ flex_info = dict()
+ try:
+ result = self.server.invoke_successfully(flexcache_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching FlexCache info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ flexcache_info = result.get_child_by_name('attributes-list') \
+ .get_child_by_name('flexcache-info')
+ flex_info['origin_cluster'] = flexcache_info.get_child_content('origin-cluster')
+ flex_info['origin_volume'] = flexcache_info.get_child_content('origin-volume')
+ flex_info['origin_vserver'] = flexcache_info.get_child_content('origin-vserver')
+ flex_info['size'] = flexcache_info.get_child_content('size')
+ flex_info['volume'] = flexcache_info.get_child_content('volume')
+ flex_info['vserver'] = flexcache_info.get_child_content('vserver')
+ flex_info['auto_provision_as'] = flexcache_info.get_child_content('auto-provision-as')
+
+ return flex_info
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 1:
+ msg = 'Multiple records found for %s:' % self.parameters['volume']
+ self.module.fail_json(msg='Error fetching FlexCache info: %s' % msg)
+ return None
+
+ def flexcache_create_async(self):
+ """
+ Create a FlexCache relationship
+ """
+ options = {'origin-volume': self.parameters['origin_volume'],
+ 'origin-vserver': self.parameters['origin_vserver'],
+ 'volume': self.parameters['volume']}
+ self.add_parameter_to_dict(options, 'junction_path', 'junction-path')
+ self.add_parameter_to_dict(options, 'auto_provision_as', 'auto-provision-as')
+ self.add_parameter_to_dict(options, 'size', 'size', tostr=True)
+ if self.parameters.get('aggr_list'):
+ if self.parameters.get('aggr_list_multiplier'):
+ self.tobytes_aggr_list_multiplier = bytes(self.parameters['aggr_list_multiplier'])
+ self.add_parameter_to_dict(options, 'tobytes_aggr_list_multiplier', 'aggr-list-multiplier')
+ flexcache_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'flexcache-create-async', **options)
+ if self.parameters.get('aggr_list'):
+ aggregates = netapp_utils.zapi.NaElement('aggr-list')
+ for aggregate in self.parameters['aggr_list']:
+ aggregates.add_new_child('aggr-name', aggregate)
+ flexcache_create.add_child_elem(aggregates)
+ try:
+ result = self.server.invoke_successfully(flexcache_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating FlexCache %s' % to_native(error),
+ exception=traceback.format_exc())
+ results = dict()
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+ return results
+
+ def flexcache_create(self):
+ """
+ Create a FlexCache relationship
+ Check job status
+ """
+ results = self.flexcache_create_async()
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ # asynchronous call, assuming success!
+ return
+ error = self.check_job_status(results['result-jobid'])
+ if error is None:
+ return
+ else:
+ self.module.fail_json(msg='Error when creating flexcache: %s' % error)
+ self.module.fail_json(msg='Unexpected error when creating flexcache: results is: %s' % repr(results))
+
+ def flexcache_delete_async(self):
+ """
+ Delete FlexCache relationship at destination cluster
+ """
+ options = {'volume': self.parameters['volume']}
+ flexcache_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'flexcache-destroy-async', **options)
+ try:
+ result = self.server.invoke_successfully(flexcache_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting FlexCache : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ results = dict()
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+ return results
+
+ def volume_offline(self):
+ """
+ Offline FlexCache volume at destination cluster
+ """
+ options = {'name': self.parameters['volume']}
+ xml = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline', **options)
+ try:
+ self.server.invoke_successfully(xml, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error offlining FlexCache volume: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def volume_unmount(self):
+ """
+ Unmount FlexCache volume at destination cluster
+ """
+ options = {'volume-name': self.parameters['volume']}
+ xml = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **options)
+ try:
+ self.server.invoke_successfully(xml, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error unmounting FlexCache volume: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def flexcache_delete(self):
+ """
+ Delete FlexCache relationship at destination cluster
+ Check job status
+ """
+ if self.parameters['force_unmount']:
+ self.volume_unmount()
+ if self.parameters['force_offline']:
+ self.volume_offline()
+ results = self.flexcache_delete_async()
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ # asynchronous call, assuming success!
+ return
+ error = self.check_job_status(results['result-jobid'])
+ if error is None:
+ return
+ else:
+ self.module.fail_json(msg='Error when deleting flexcache: %s' % error)
+ self.module.fail_json(msg='Unexpected error when deleting flexcache: results is: %s' % repr(results))
+
+ def check_parameters(self):
+ """
+ Validate parameters and fail if one or more required params are missing
+ """
+ missings = list()
+ expected = ('origin_volume', 'origin_vserver')
+ if self.parameters['state'] == 'present':
+ for param in expected:
+ if not self.parameters.get(param):
+ missings.append(param)
+ if missings:
+ plural = 's' if len(missings) > 1 else ''
+ msg = 'Missing parameter%s: %s' % (plural, ', '.join(missings))
+ self.module.fail_json(msg=msg)
+
+ def apply(self):
+ """
+ Apply action to FlexCache
+ """
+ netapp_utils.ems_log_event("na_ontap_flexcache", self.server)
+ current = self.flexcache_get()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.check_parameters()
+ self.flexcache_create()
+ elif cd_action == 'delete':
+ self.flexcache_delete()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPFlexCache()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py
new file mode 100644
index 00000000..97c6be5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+''' this is igroup module
+
+ (c) 2018-2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_igroup
+short_description: NetApp ONTAP iSCSI or FC igroup configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create/Delete/Rename Igroups and Modify initiators belonging to an igroup
+
+options:
+ state:
+ description:
+ - Whether the specified Igroup should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the igroup to manage.
+ required: true
+ type: str
+
+ initiator_group_type:
+ description:
+ - Type of the initiator group.
+ - Required when C(state=present).
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+
+ from_name:
+ description:
+ - Name of igroup to rename to name.
+ version_added: 2.7.0
+ type: str
+
+ os_type:
+ description:
+ - OS type of the initiators within the group.
+ type: str
+ aliases: ['ostype']
+
+ initiators:
+ description:
+ - List of initiators to be mapped to the igroup.
+ - WWPN, WWPN Alias, or iSCSI name of Initiator to add or remove.
+ - For a modify operation, this list replaces the exisiting initiators
+ - This module does not add or remove specific initiator(s) in an igroup
+ aliases:
+ - initiator
+ type: list
+ elements: str
+
+ bind_portset:
+ description:
+ - Name of a current portset to bind to the newly created igroup.
+ type: str
+
+ force_remove_initiator:
+ description:
+ - Forcibly remove the initiator even if there are existing LUNs mapped to this initiator group.
+ type: bool
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Create iSCSI Igroup
+ na_ontap_igroup:
+ state: present
+ name: ansibleIgroup3
+ initiator_group_type: iscsi
+ os_type: linux
+ initiators: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com,abc.com:redhat.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create FC Igroup
+ na_ontap_igroup:
+ state: present
+ name: ansibleIgroup4
+ initiator_group_type: fcp
+ os_type: linux
+ initiators: 20:00:00:50:56:9f:19:82
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: rename Igroup
+ na_ontap_igroup:
+ state: present
+ from_name: ansibleIgroup3
+ name: testexamplenewname
+ initiator_group_type: iscsi
+ os_type: linux
+ initiators: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify Igroup Initiators (replaces exisiting initiators)
+ na_ontap_igroup:
+ state: present
+ name: ansibleIgroup3
+ initiator_group_type: iscsi
+ os_type: linux
+ initiator: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete Igroup
+ na_ontap_igroup:
+ state: absent
+ name: ansibleIgroup3
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapIgroup(object):
+ """Create/Delete/Rename Igroups and Modify initiators list"""
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str', default=None),
+ os_type=dict(required=False, type='str', aliases=['ostype']),
+ initiator_group_type=dict(required=False, type='str',
+ choices=['fcp', 'iscsi', 'mixed']),
+ initiators=dict(required=False, type='list', elements='str', aliases=['initiator']),
+ vserver=dict(required=True, type='str'),
+ force_remove_initiator=dict(required=False, type='bool', default=False),
+ bind_portset=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.module.params.get('initiators') is not None:
+ self.parameters['initiators'] = [self.na_helper.sanitize_wwn(initiator)
+ for initiator in self.module.params['initiators']]
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_igroup(self, name):
+ """
+ Return details about the igroup
+ :param:
+ name : Name of the igroup
+
+ :return: Details about the igroup. None if not found.
+ :rtype: dict
+ """
+ igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter')
+ attributes = dict(query={'initiator-group-info': {'initiator-group-name': name,
+ 'vserver': self.parameters['vserver']}})
+ igroup_info.translate_struct(attributes)
+ result, current = None, None
+
+ try:
+ result = self.server.invoke_successfully(igroup_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ igroup = result.get_child_by_name('attributes-list').get_child_by_name('initiator-group-info')
+ initiators = []
+ if igroup.get_child_by_name('initiators'):
+ current_initiators = igroup['initiators'].get_children()
+ for initiator in current_initiators:
+ initiators.append(initiator['initiator-name'])
+ current = {
+ 'initiators': initiators
+ }
+
+ return current
+
+ def add_initiators(self):
+ """
+ Add the list of initiators to igroup
+ :return: None
+ """
+ # don't add if initiators is empty string
+ if self.parameters.get('initiators') == [''] or self.parameters.get('initiators') is None:
+ return
+ for initiator in self.parameters['initiators']:
+ self.modify_initiator(initiator, 'igroup-add')
+
+ def remove_initiators(self, initiators):
+ """
+ Removes all existing initiators from igroup
+ :return: None
+ """
+ for initiator in initiators:
+ self.modify_initiator(initiator, 'igroup-remove')
+
+ def modify_initiator(self, initiator, zapi):
+ """
+ Add or remove an initiator to/from an igroup
+ """
+ options = {'initiator-group-name': self.parameters['name'],
+ 'initiator': initiator}
+
+ igroup_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(igroup_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_igroup(self):
+ """
+ Create the igroup.
+ """
+ options = {'initiator-group-name': self.parameters['name']}
+ if self.parameters.get('os_type') is not None:
+ options['os-type'] = self.parameters['os_type']
+ if self.parameters.get('initiator_group_type') is not None:
+ options['initiator-group-type'] = self.parameters['initiator_group_type']
+ if self.parameters.get('bind_portset') is not None:
+ options['bind-portset'] = self.parameters['bind_portset']
+
+ igroup_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-create', **options)
+
+ try:
+ self.server.invoke_successfully(igroup_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error provisioning igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.add_initiators()
+
+ def delete_igroup(self):
+ """
+ Delete the igroup.
+ """
+ igroup_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-destroy', **{'initiator-group-name': self.parameters['name'],
+ 'force': 'true' if self.parameters['force_remove_initiator'] else 'false'})
+
+ try:
+ self.server.invoke_successfully(igroup_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_igroup(self):
+ """
+ Rename the igroup.
+ """
+ igroup_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-rename', **{'initiator-group-name': self.parameters['from_name'],
+ 'initiator-group-new-name': str(self.parameters['name'])})
+ try:
+ self.server.invoke_successfully(igroup_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ netapp_utils.ems_log_event("na_ontap_igroup", self.server)
+
+ def apply(self):
+ self.autosupport_log()
+ current = self.get_igroup(self.parameters['name'])
+ # rename and create are mutually exclusive
+ rename, cd_action, modify = None, None, None
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_igroup(self.parameters['from_name']), current)
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_igroup()
+ elif cd_action == 'create':
+ self.create_igroup()
+ elif cd_action == 'delete':
+ self.delete_igroup()
+ if modify:
+ self.remove_initiators(current['initiators'])
+ self.add_initiators()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapIgroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py
new file mode 100644
index 00000000..0525cae4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+''' This is an Ansible module for ONTAP, to manage initiators in an Igroup
+
+ (c) 2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_igroup_initiator
+short_description: NetApp ONTAP igroup initiator configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add/Remove initiators from an igroup
+
+options:
+ state:
+ description:
+ - Whether the specified initiator should exist or not in an igroup.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ names:
+ description:
+ - List of initiators to manage.
+ required: true
+ aliases:
+ - name
+ type: list
+ elements: str
+
+ initiator_group:
+ description:
+ - Name of the initiator group to which the initiator belongs.
+ required: true
+ type: str
+
+ force_remove:
+ description:
+ - Forcibly remove the initiators even if there are existing LUNs mapped to the initiator group.
+ type: bool
+ default: false
+ version_added: '20.1.0'
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Add initiators to an igroup
+ na_ontap_igroup_initiator:
+ names: abc.test:def.com,def.test:efg.com
+ initiator_group: test_group
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Remove an initiator from an igroup
+ na_ontap_igroup_initiator:
+ state: absent
+ names: abc.test:def.com
+ initiator_group: test_group
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapIgroupInitiator(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ names=dict(required=True, type='list', elements='str', aliases=['name']),
+ initiator_group=dict(required=True, type='str'),
+ force_remove=dict(required=False, type='bool', default=False),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_initiators(self):
+ """
+ Get the existing list of initiators from an igroup
+ :rtype: list() or None
+ """
+ igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter')
+ attributes = dict(query={'initiator-group-info': {'initiator-group-name': self.parameters['initiator_group'],
+ 'vserver': self.parameters['vserver']}})
+ igroup_info.translate_struct(attributes)
+ result, current = None, []
+
+ try:
+ result = self.server.invoke_successfully(igroup_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['initiator_group'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ igroup_info = result.get_child_by_name('attributes-list').get_child_by_name('initiator-group-info')
+ if igroup_info.get_child_by_name('initiators') is not None:
+ current = [initiator['initiator-name'] for initiator in igroup_info['initiators'].get_children()]
+ return current
+
+ def modify_initiator(self, initiator_name, zapi):
+ """
+ Add or remove an initiator to/from an igroup
+ """
+ options = {'initiator-group-name': self.parameters['initiator_group'],
+ 'initiator': initiator_name,
+ 'force': 'true' if zapi == 'igroup-remove' and self.parameters['force_remove'] else 'false'}
+ initiator_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(initiator_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (initiator_name,
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ netapp_utils.ems_log_event("na_ontap_igroup_initiator", self.server)
+
+ def apply(self):
+ self.autosupport_log()
+ initiators = self.get_initiators()
+ for initiator in self.parameters['names']:
+ present = None
+ initiator = self.na_helper.sanitize_wwn(initiator)
+ if initiator in initiators:
+ present = True
+ cd_action = self.na_helper.get_cd_action(present, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.modify_initiator(initiator, 'igroup-add')
+ elif cd_action == 'delete':
+ self.modify_initiator(initiator, 'igroup-remove')
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapIgroupInitiator()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py
new file mode 100644
index 00000000..8336409d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py
@@ -0,0 +1,1787 @@
+#!/usr/bin/python
+
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_info
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_info
+author: Piotr Olczak (@dprts) <polczak@redhat.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+short_description: NetApp information gatherer
+description:
+ - This module allows you to gather various information about ONTAP configuration
+version_added: 2.9.0
+requirements:
+ - netapp_lib
+options:
+ state:
+ type: str
+ description:
+ - Returns "info"
+ default: "info"
+ choices: ['info']
+ vserver:
+ type: str
+ description:
+ - If present, 'vserver tunneling' will limit the output to the vserver scope.
+ - Note that not all subsets are supported on a vserver, and 'all' will trigger an error.
+ version_added: '19.11.0'
+ gather_subset:
+ type: list
+ elements: str
+ description:
+ - When supplied, this argument will restrict the information collected
+ to a given subset. Possible values for this argument include
+ "aggregate_info",
+ "aggr_efficiency_info",
+ "cifs_options_info",
+ "cifs_server_info",
+ "cifs_share_info",
+ "cifs_vserver_security_info",
+ "cluster_identity_info",
+ "cluster_image_info",
+ "cluster_log_forwarding_info",
+ "cluster_node_info",
+ "cluster_peer_info",
+ "cluster_switch_info",
+ "clock_info",
+ "disk_info",
+ "env_sensors_info",
+ "event_notification_destination_info",
+ "event_notification_info",
+ "export_policy_info",
+ "export_rule_info",
+ "fcp_adapter_info",
+ "fcp_alias_info",
+ "fcp_service_info",
+ "igroup_info",
+ "iscsi_service_info",
+ "job_schedule_cron_info",
+ "kerberos_realm_info",
+ "ldap_client",
+ "ldap_config",
+ "license_info",
+ "lun_info",
+ "lun_map_info",
+ "metrocluster_check_info",
+ "metrocluster_info",
+ "metrocluster_node_info",
+ "net_dev_discovery_info",
+ "net_dns_info",
+ "net_failover_group_info",
+ "net_firewall_info",
+ "net_ifgrp_info",
+ "net_interface_info",
+ "net_interface_service_policy_info",
+ "net_ipspaces_info",
+ "net_port_info",
+ "net_port_broadcast_domain_info",
+ "net_routes_info",
+ "net_vlan_info",
+ "nfs_info",
+ "ntfs_dacl_info",
+ "ntfs_sd_info",
+ "ntp_server_info",
+ "nvme_info",
+ "nvme_interface_info",
+ "nvme_namespace_info",
+ "nvme_subsystem_info",
+ "ontap_system_version",
+ "ontap_version",
+ "ontapi_version",
+ "qos_adaptive_policy_info",
+ "qos_policy_info",
+ "qtree_info",
+ "quota_report_info",
+ "role_info",
+ "security_key_manager_key_info",
+ "security_login_account_info",
+ "security_login_role_config_info",
+ "security_login_role_info",
+ "service_processor_info",
+ "service_processor_network_info",
+ "shelf_info"
+ "sis_info",
+ "sis_policy_info",
+ "snapmirror_info",
+ "snapmirror_destination_info",
+ "snapmirror_policy_info",
+ "snapshot_info",
+ "snapshot_policy_info",
+ "storage_failover_info",
+ "storage_bridge_info",
+ "subsys_health_info",
+ "sysconfig_info",
+ "sys_cluster_alerts",
+ "volume_info",
+ "volume_space_info",
+ "vscan_info",
+ "vscan_status_info",
+ "vscan_scanner_pool_info",
+ "vscan_connection_status_all_info",
+ "vscan_connection_extended_stats_info",
+ "vserver_info",
+ "vserver_login_banner_info",
+ "vserver_motd_info",
+ "vserver_nfs_info",
+ "vserver_peer_info",
+ Can specify a list of values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ - nvme is supported with ONTAP 9.4 onwards.
+ - use "help" to get a list of supported information for your system.
+ default: "all"
+ max_records:
+ type: int
+ description:
+ - Maximum number of records returned in a single ZAPI call. Valid range is [1..2^32-1].
+ This parameter controls internal behavior of this module.
+ default: 1024
+ version_added: '20.2.0'
+ summary:
+ description:
+ - Boolean flag to control return all attributes of the module info or only the names.
+ - If true, only names are returned.
+ default: false
+ type: bool
+ version_added: '20.4.0'
+ volume_move_target_aggr_info:
+ description:
+ - Required options for volume_move_target_aggr_info
+ type: dict
+ version_added: '20.5.0'
+ suboptions:
+ volume_name:
+ description:
+ - Volume name to get target aggr info for
+ required: true
+ type: str
+ version_added: '20.5.0'
+ vserver:
+ description:
+ - vserver the Volume lives on
+ required: true
+ type: str
+ version_added: '20.5.0'
+ desired_attributes:
+ description:
+ - Advanced feature requiring to understand ZAPI internals.
+ - Allows to request a specific attribute that is not returned by default, or to limit the returned attributes.
+ - A dictionary for the zapi desired-attributes element.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - Only a single subset can be called at a time if this option is set.
+ - It is the caller responsibity to make sure key attributes are present in the right position.
+ - The module will error out if any key attribute is missing.
+ type: dict
+ version_added: '20.6.0'
+ query:
+ description:
+ - Advanced feature requiring to understand ZAPI internals.
+ - Allows to specify which objects to return.
+ - A dictionary for the zapi query element.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - Only a single subset can be called at a time if this option is set.
+ type: dict
+ version_added: '20.7.0'
+ use_native_zapi_tags:
+ description:
+ - By default, I(-) in the returned dictionary keys are translated to I(_).
+ - If set to true, the translation is disabled.
+ type: bool
+ default: false
+ version_added: '20.6.0'
+ continue_on_error:
+ description:
+ - By default, this module fails on the first error.
+ - This option allows to provide a list of errors that are not failing the module.
+ - Errors in the list are reported in the output, under the related info element, as an "error" entry.
+ - Possible values are always, never, missing_vserver_api_error, rpc_error, other_error.
+ - missing_vserver_api_error - most likely the API is available at cluster level but not vserver level.
+ - rpc_error - some queries are failing because the node cannot reach another node in the cluster.
+ - key_error - a query is failing because the returned data does not contain an expected key.
+ - for key errors, make sure to report this in Slack. It may be a change in a new ONTAP version.
+ - other_error - anything not in the above list.
+ - always will continue on any error, never will fail on any error, they cannot be used with any other keyword.
+ type: list
+ elements: str
+ default: never
+'''
+
+EXAMPLES = '''
+- name: Get NetApp info as Cluster Admin (Password Authentication)
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ register: ontap_info
+- debug:
+ msg: "{{ ontap_info.ontap_info }}"
+
+- name: Get NetApp version as Vserver admin
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "vsadmin"
+ vserver: trident_svm
+ password: "vsadmins_password"
+
+- name: run ontap info module using vserver tunneling and ignoring errors
+ na_ontap_info:
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ vserver: trident_svm
+ summary: true
+ continue_on_error:
+ - missing_vserver_api_error
+ - rpc_error
+
+- name: Limit Info Gathering to Aggregate Information as Cluster Admin
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: "aggregate_info"
+ register: ontap_info
+
+- name: Limit Info Gathering to Volume and Lun Information as Cluster Admin
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - volume_info
+ - lun_info
+ register: ontap_info
+
+- name: Gather all info except for volume and lun information as Cluster Admin
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - "!volume_info"
+ - "!lun_info"
+ register: ontap_info
+
+- name: Gather Volume move information for a specific volume
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: volume_move_target_aggr_info
+ volume_move_target_aggr_info:
+ volume_name: carchitest
+ vserver: ansible
+
+- name: run ontap info module for aggregate module, requesting specific fields
+ na_ontap_info:
+ # <<: *login
+ gather_subset: aggregate_info
+ desired_attributes:
+ aggr-attributes:
+ aggr-inode-attributes:
+ files-private-used:
+ aggr-raid-attributes:
+ aggregate-type:
+ use_native_zapi_tags: true
+ register: ontap
+- debug: var=ontap
+
+- name: run ontap info to get offline volumes with dp in the name
+ na_ontap_info:
+ # <<: *cert_login
+ gather_subset: volume_info
+ query:
+ volume-attributes:
+ volume-id-attributes:
+ name: '*dp*'
+ volume-state-attributes:
+ state: offline
+ desired_attributes:
+ volume-attributes:
+ volume-id-attributes:
+ name:
+ volume-state-attributes:
+ state:
+ register: ontap
+- debug: var=ontap
+'''
+
+RETURN = '''
+ontap_info:
+ description: Returns various information about NetApp cluster configuration
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_info": {
+ "aggregate_info": {...},
+ "cluster_identity_info": {...},
+ "cluster_image_info": {...},
+ "cluster_node_info": {...},
+ "igroup_info": {...},
+ "iscsi_service_info": {...},
+ "license_info": {...},
+ "lun_info": {...},
+ "metrocluster_check_info": {...},
+ "metrocluster_info": {...},
+ "metrocluster_node_info": {...},
+ "net_dns_info": {...},
+ "net_ifgrp_info": {...},
+ "net_interface_info": {...},
+ "net_interface_service_policy_info": {...},
+ "net_port_info": {...},
+ "ontap_system_version": {...},
+ "ontap_version": {...},
+ "ontapi_version": {...},
+ "qos_policy_info": {...},
+ "qos_adaptive_policy_info": {...},
+ "qtree_info": {...},
+ "quota_report_info": {...},
+ "security_key_manager_key_info": {...},
+ "security_login_account_info": {...},
+ "snapmirror_info": {...}
+ "snapmirror_destination_info": {...}
+ "storage_bridge_info": {...}
+ "storage_failover_info": {...},
+ "volume_info": {...},
+ "vserver_login_banner_info": {...},
+ "vserver_motd_info": {...},
+ "vserver_info": {...},
+ "vserver_nfs_info": {...},
+ "vscan_status_info": {...},
+ "vscan_scanner_pool_info": {...},
+ "vscan_connection_status_all_info": {...},
+ "vscan_connection_extended_stats_info": {...}
+ }'
+'''
+
+import copy
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPGatherInfo(object):
+ '''Class with gather info methods'''
+
+ def __init__(self, module, max_records):
+ self.module = module
+ self.max_records = str(max_records)
+ volume_move_target_aggr_info = module.params.get('volume_move_target_aggr_info', dict())
+ if volume_move_target_aggr_info is None:
+ volume_move_target_aggr_info = dict()
+ self.netapp_info = dict()
+ self.desired_attributes = module.params['desired_attributes']
+ self.query = module.params['query']
+ self.translate_keys = not module.params['use_native_zapi_tags']
+ self.warnings = list() # warnings will be added to the info results, if any
+ self.set_error_flags()
+
+ # thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
+ # for starting this
+ # min_version identifies the ontapi version which supports this ZAPI
+ # use 0 if it is supported since 9.1
+ self.info_subsets = {
+ 'cluster_identity_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-identity-get',
+ 'attributes_list_tag': 'attributes',
+ 'attribute': 'cluster-identity-info',
+ 'key_fields': 'cluster-name',
+ },
+ 'min_version': '0',
+ },
+ 'cluster_image_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-image-get-iter',
+ 'attribute': 'cluster-image-info',
+ 'key_fields': 'node-id',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_log_forwarding_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-log-forward-get-iter',
+ 'attribute': 'cluster-log-forward-info',
+ 'key_fields': ('destination', 'port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-node-get-iter',
+ 'attribute': 'cluster-node-info',
+ 'key_fields': 'node-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_account_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-get-iter',
+ 'attribute': 'security-login-account-info',
+ 'key_fields': ('vserver', 'user-name', 'application', 'authentication-method'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_role_config_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-config-get-iter',
+ 'attribute': 'security-login-role-config-info',
+ 'key_fields': ('vserver', 'role-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_role_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-get-iter',
+ 'attribute': 'security-login-role-info',
+ 'key_fields': ('vserver', 'role-name', 'command-directory-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'aggregate_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-get-iter',
+ 'attribute': 'aggr-attributes',
+ 'key_fields': 'aggregate-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'volume_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-get-iter',
+ 'attribute': 'volume-attributes',
+ 'key_fields': ('name', 'owning-vserver-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'license_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'license-v2-list-info',
+ 'attributes_list_tag': None,
+ 'attribute': 'licenses',
+ },
+ 'min_version': '0',
+ },
+ 'lun_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-get-iter',
+ 'attribute': 'lun-info',
+ 'key_fields': ('vserver', 'path'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_check_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-check-get-iter',
+ 'attribute': 'metrocluster-check-info',
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-get',
+ 'attribute': 'metrocluster-info',
+ 'attributes_list_tag': 'attributes',
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-node-get-iter',
+ 'attribute': 'metrocluster-node-info',
+ 'key_fields': ('cluster-name', 'node-name'),
+ },
+ 'min_version': '0',
+ },
+ 'net_dns_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-dns-get-iter',
+ 'attribute': 'net-dns-info',
+ 'key_fields': 'vserver-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-get-iter',
+ 'attribute': 'net-interface-info',
+ 'key_fields': 'interface-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_service_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-service-policy-get-iter',
+ 'attribute': 'net-interface-service-policy-info',
+ 'key_fields': ('vserver', 'policy'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '150',
+ },
+ 'net_port_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-get-iter',
+ 'attribute': 'net-port-info',
+ 'key_fields': ('node', 'port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_key_manager_key_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-key-manager-key-get-iter',
+ 'attribute': 'security-key-manager-key-info',
+ 'key_fields': ('node', 'key-id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'storage_failover_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cf-get-iter',
+ 'attribute': 'storage-failover-info',
+ 'key_fields': 'node',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_motd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-motd-get-iter',
+ 'attribute': 'vserver-motd-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_login_banner_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-login-banner-get-iter',
+ 'attribute': 'vserver-login-banner-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-get-iter',
+ 'attribute': 'vserver-info',
+ 'key_fields': 'vserver-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_ifgrp_info': {
+ 'method': self.get_ifgrp_info,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontap_system_version': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-get-version',
+ 'attributes_list_tag': None,
+ },
+ 'min_version': '0',
+ },
+ 'ontap_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontapi_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'clock_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'clock-get-clock',
+ 'attributes_list_tag': None,
+ },
+ 'min_version': '0'
+ },
+ 'system_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-node-get-iter',
+ 'attribute': 'node-details-info',
+ 'key_fields': 'node',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'igroup_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'igroup-get-iter',
+ 'attribute': 'initiator-group-info',
+ 'key_fields': ('vserver', 'initiator-group-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'iscsi_service_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'iscsi-service-get-iter',
+ 'attribute': 'iscsi-service-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'qos_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-policy-group-get-iter',
+ 'attribute': 'qos-policy-group-info',
+ 'key_fields': 'policy-group',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'qtree_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qtree-list-iter',
+ 'attribute': 'qtree-info',
+ 'key_fields': ('vserver', 'volume', 'id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'quota_report_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'quota-report-iter',
+ 'attribute': 'quota',
+ 'key_fields': ('vserver', 'volume', 'tree', 'quota-type', 'quota-target'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_status_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-status-get-iter',
+ 'attribute': 'vscan-status-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_scanner_pool_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-scanner-pool-get-iter',
+ 'attribute': 'vscan-scanner-pool-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_connection_status_all_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-connection-status-all-get-iter',
+ 'attribute': 'vscan-connection-status-all-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_connection_extended_stats_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-connection-extended-stats-get-iter',
+ 'attribute': 'vscan-connection-extended-stats-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapshot_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapshot-get-iter',
+ 'attribute': 'snapshot-info',
+ 'key_fields': ('vserver', 'volume', 'name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'storage_bridge_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-bridge-get-iter',
+ 'attribute': 'storage-bridge-info',
+ 'key_fields': 'name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ # supported in ONTAP 9.3 and onwards
+ 'qos_adaptive_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-adaptive-policy-group-get-iter',
+ 'attribute': 'qos-adaptive-policy-group-info',
+ 'key_fields': 'policy-group',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '130',
+ },
+ # supported in ONTAP 9.4 and onwards
+ 'nvme_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-get-iter',
+ 'attribute': 'nvme-target-service-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-interface-get-iter',
+ 'attribute': 'nvme-interface-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_subsystem_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-subsystem-get-iter',
+ 'attribute': 'nvme-subsystem-info',
+ 'key_fields': 'subsystem',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_namespace_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-namespace-get-iter',
+ 'attribute': 'nvme-namespace-info',
+ 'key_fields': 'path',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+
+ # Alpha Order
+
+ 'aggr_efficiency_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-efficiency-get-iter',
+ 'attribute': 'aggr-efficiency-info',
+ 'key_fields': ('node', 'aggregate'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'cifs_options_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-options-get-iter',
+ 'attribute': 'cifs-options',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_server_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-server-get-iter',
+ 'attribute': 'cifs-server-config',
+ # preferred key is <vserver>:<domain>:<cifs-server>
+ # alternate key is <vserver>:<domain-workgroup>:<cifs-server>
+ 'key_fields': ('vserver', ('domain', 'domain-workgroup'), 'cifs-server'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_share_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-share-get-iter',
+ 'attribute': 'cifs-share',
+ 'key_fields': ('share-name', 'path', 'cifs-server'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_vserver_security_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-security-get-iter',
+ 'attribute': 'cifs-security',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_peer_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-peer-get-iter',
+ 'attribute': 'cluster-peer-info',
+ 'key_fields': ('cluster-name', 'remote-cluster-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_switch_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-switch-get-iter',
+ 'attribute': 'cluster-switch-info',
+ 'key_fields': ('device', 'model', 'serial-number'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '160',
+ },
+ 'disk_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-disk-get-iter',
+ 'attribute': 'storage-disk-info',
+ 'key_fields': ('disk-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'env_sensors_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'environment-sensors-get-iter',
+ 'attribute': 'environment-sensors-info',
+ 'key_fields': ('node-name', 'sensor-name'),
+ 'query': {'max-records': self.max_records},
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'event_notification_destination_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ems-event-notification-destination-get-iter',
+ 'attribute': 'event-notification-destination-info',
+ 'key_fields': ('name', 'type'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'event_notification_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ems-event-notification-get-iter',
+ 'attribute': 'event-notification',
+ 'key_fields': ('id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'export_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'export-policy-get-iter',
+ 'attribute': 'export-policy-info',
+ 'key_fields': ('vserver', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'export_rule_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'export-rule-get-iter',
+ 'attribute': 'export-rule-info',
+ 'key_fields': ('vserver-name', 'policy-name', 'rule-index'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_adapter_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ucm-adapter-get-iter',
+ 'attribute': 'uc-adapter-info',
+ 'key_fields': ('adapter-name', 'node-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_alias_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'fcp-wwpnalias-get-iter',
+ 'attribute': 'aliases-info',
+ 'key_fields': ('aliases-alias', 'vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_service_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'fcp-service-get-iter',
+ 'attribute': 'fcp-service-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'job_schedule_cron_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'job-schedule-cron-get-iter',
+ 'attribute': 'job-schedule-cron-info',
+ 'key_fields': ('job-schedule-name', 'job-schedule-cluster'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'kerberos_realm_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'kerberos-realm-get-iter',
+ 'attribute': 'kerberos-realm',
+ 'key_fields': ('vserver-name', 'realm'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ldap_client': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ldap-client-get-iter',
+ 'attribute': 'ldap-client',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ldap_config': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ldap-config-get-iter',
+ 'attribute': 'ldap-config',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'lun_map_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-map-get-iter',
+ 'attribute': 'lun-map-info',
+ 'key_fields': ('initiator-group', 'lun-id', 'node', 'path', 'vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_dev_discovery_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-device-discovery-get-iter',
+ 'attribute': 'net-device-discovery-info',
+ 'key_fields': ('port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_failover_group_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-failover-group-get-iter',
+ 'attribute': 'net-failover-group-info',
+ 'key_fields': ('vserver', 'failover-group'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_firewall_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-firewall-policy-get-iter',
+ 'attribute': 'net-firewall-policy-info',
+ 'key_fields': ('policy', 'vserver', 'service'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_ipspaces_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-ipspaces-get-iter',
+ 'attribute': 'net-ipspaces-info',
+ 'key_fields': ('ipspace'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_port_broadcast_domain_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-broadcast-domain-get-iter',
+ 'attribute': 'net-port-broadcast-domain-info',
+ 'key_fields': ('broadcast-domain', 'ipspace'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_routes_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-routes-get-iter',
+ 'attribute': 'net-vs-routes-info',
+ 'key_fields': ('vserver', 'destination', 'gateway'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_vlan_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-vlan-get-iter',
+ 'attribute': 'vlan-info',
+ 'key_fields': ('interface-name', 'node'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntfs_dacl_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'file-directory-security-ntfs-dacl-get-iter',
+ 'attribute': 'file-directory-security-ntfs-dacl',
+ 'key_fields': ('vserver', 'ntfs-sd', 'account', 'access-type'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntfs_sd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'file-directory-security-ntfs-get-iter',
+ 'attribute': 'file-directory-security-ntfs',
+ 'key_fields': ('vserver', 'ntfs-sd'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntp_server_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ntp-server-get-iter',
+ 'attribute': 'ntp-server-info',
+ 'key_fields': ('server-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'role_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-get-iter',
+ 'attribute': 'security-login-role-info',
+ 'key_fields': ('vserver', 'role-name', 'access-level', 'command-directory-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'service_processor_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'service-processor-get-iter',
+ 'attribute': 'service-processor-info',
+ 'key_fields': ('node'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'service_processor_network_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'service-processor-network-get-iter',
+ 'attribute': 'service-processor-network-info',
+ # don't use key_fieldss, as we cannot build a key with optional key_fieldss
+ # without a key, we'll get a list of dictionaries
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'shelf_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-shelf-info-get-iter',
+ 'attribute': 'storage-shelf-info',
+ 'key_fields': ('shelf-id', 'serial-number'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sis_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'sis-get-iter',
+ 'attribute': 'sis-status-info',
+ 'key_fields': 'path',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sis_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'sis-policy-get-iter',
+ 'attribute': 'sis-policy-info',
+ 'key_fields': ('vserver', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapmirror_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-get-iter',
+ 'attribute': 'snapmirror-info',
+ 'key_fields': 'destination-location',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'snapmirror_destination_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-get-destination-iter',
+ 'attribute': 'snapmirror-destination-info',
+ 'key_fields': 'destination-location',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'snapmirror_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-policy-get-iter',
+ 'attribute': 'snapmirror-policy-info',
+ 'key_fields': ('vserver-name', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapshot_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapshot-policy-get-iter',
+ 'attribute': 'snapshot-policy-info',
+ 'key_fields': ('vserver-name', 'policy'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'subsys_health_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'diagnosis-subsystem-config-get-iter',
+ 'attribute': 'diagnosis-subsystem-config-info',
+ 'key_fields': 'subsystem',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sys_cluster_alerts': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'diagnosis-alert-get-iter',
+ 'attribute': 'diagnosis-alert-info',
+ 'key_fields': ('node', 'alerting-resource'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sysconfig_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-get-node-info-iter',
+ 'attribute': 'system-info',
+ 'key_fields': ('system-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'volume_move_target_aggr_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-move-target-aggr-get-iter',
+ 'attribute': 'volume-move-target-aggr-info',
+ 'query': {'max-records': self.max_records,
+ 'volume-name': volume_move_target_aggr_info.get('volume_name', None),
+ 'vserver': volume_move_target_aggr_info.get('vserver', None)},
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'volume_space_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-space-get-iter',
+ 'attribute': 'space-info',
+ 'key_fields': ('vserver', 'volume'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-status-get-iter',
+ 'attribute': 'vscan-status-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_peer_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-peer-get-iter',
+ 'attribute': 'vserver-peer-info',
+ 'key_fields': ('vserver', 'remote-vserver-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ }
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ # use vserver tunneling if vserver is present (not None)
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=module.params['vserver'])
+
+ def ontapi(self):
+ '''Method to get ontapi version'''
+
+ api = 'system-get-ontapi-version'
+ api_call = netapp_utils.zapi.NaElement(api)
+ try:
+ results = self.server.invoke_successfully(api_call, enable_tunneling=True)
+ ontapi_version = results.get_child_content('minor-version')
+ return ontapi_version if ontapi_version is not None else '0'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api, to_native(error)), exception=traceback.format_exc())
+
+ def call_api(self, call, attributes_list_tag='attributes-list', query=None, fail_on_error=True):
+ '''Main method to run an API call'''
+
+ api_call = netapp_utils.zapi.NaElement(call)
+ initial_result = None
+ result = None
+
+ if query:
+ for key, val in query.items():
+ # Can val be nested?
+ api_call.add_new_child(key, val)
+
+ if self.desired_attributes is not None:
+ api_call.translate_struct(self.desired_attributes)
+ if self.query is not None:
+ api_call.translate_struct(self.query)
+ try:
+ initial_result = self.server.invoke_successfully(api_call, enable_tunneling=True)
+ next_tag = initial_result.get_child_by_name('next-tag')
+ result = copy.copy(initial_result)
+
+ while next_tag:
+ next_tag_call = netapp_utils.zapi.NaElement(call)
+ if query:
+ for key, val in query.items():
+ next_tag_call.add_new_child(key, val)
+
+ next_tag_call.add_new_child("tag", next_tag.get_content(), True)
+ next_result = self.server.invoke_successfully(next_tag_call, enable_tunneling=True)
+
+ next_tag = next_result.get_child_by_name('next-tag')
+ if attributes_list_tag is None:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api_call.to_string(), "'next-tag' is not expected for this API"))
+
+ result_attr = result.get_child_by_name(attributes_list_tag)
+ new_records = next_result.get_child_by_name(attributes_list_tag)
+ if new_records:
+ for record in new_records.get_children():
+ result_attr.add_child_elem(record)
+
+ return result, None
+
+ except netapp_utils.zapi.NaApiError as error:
+ if call in ['security-key-manager-key-get-iter']:
+ return result, None
+ kind, error_message = netapp_utils.classify_zapi_exception(error)
+ if kind == 'missing_vserver_api_error':
+ # for missing_vserver_api_error, the API is already in error_message
+ error_message = "Error invalid API. %s" % error_message
+ else:
+ error_message = "Error calling API %s: %s" % (call, error_message)
+ if self.error_flags[kind] and fail_on_error:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ return None, error_message
+
+ def get_ifgrp_info(self):
+ '''Method to get network port ifgroups info'''
+
+ try:
+ net_port_info = self.netapp_info['net_port_info']
+ except KeyError:
+ net_port_info_calls = self.info_subsets['net_port_info']
+ net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
+ interfaces = net_port_info.keys()
+
+ ifgrps = []
+ for ifn in interfaces:
+ if net_port_info[ifn]['port_type'] == 'if_group':
+ ifgrps.append(ifn)
+
+ net_ifgrp_info = dict()
+ for ifgrp in ifgrps:
+ query = dict()
+ query['node'], query['ifgrp-name'] = ifgrp.split(':')
+
+ tmp = self.get_generic_get_iter('net-port-ifgrp-get', key_fields=('node', 'ifgrp-name'),
+ attribute='net-ifgrp-info', query=query,
+ attributes_list_tag='attributes')
+ net_ifgrp_info = net_ifgrp_info.copy()
+ net_ifgrp_info.update(tmp)
+ return net_ifgrp_info
+
+ def get_generic_get_iter(self, call, attribute=None, key_fields=None, query=None, attributes_list_tag='attributes-list', fail_on_error=True):
+ '''Method to run a generic get-iter call'''
+
+ generic_call, error = self.call_api(call, attributes_list_tag, query, fail_on_error=fail_on_error)
+
+ if error is not None:
+ return {'error': error}
+
+ if generic_call is None:
+ return None
+
+ if attributes_list_tag is None:
+ attributes_list = generic_call
+ else:
+ attributes_list = generic_call.get_child_by_name(attributes_list_tag)
+
+ if attributes_list is None:
+ return None
+
+ if key_fields is None:
+ out = []
+ else:
+ out = {}
+
+ iteration = 0
+ for child in attributes_list.get_children():
+ iteration += 1
+ dic = xmltodict.parse(child.to_string(), xml_attribs=False)
+
+ if attribute is not None:
+ dic = dic[attribute]
+
+ info = json.loads(json.dumps(dic))
+ if self.translate_keys:
+ info = convert_keys(info)
+ if isinstance(key_fields, str):
+ try:
+ unique_key = _finditem(dic, key_fields)
+ except KeyError as exc:
+ error_message = 'Error: key %s not found for %s, got: %s' % (str(exc), call, repr(info))
+ if self.error_flags['key_error']:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ unique_key = 'Error_%d_key_not_found_%s' % (iteration, exc.args[0])
+ elif isinstance(key_fields, tuple):
+ try:
+ unique_key = ':'.join([_finditem(dic, el) for el in key_fields])
+ except KeyError as exc:
+ error_message = 'Error: key %s not found for %s, got: %s' % (str(exc), call, repr(info))
+ if self.error_flags['key_error']:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ unique_key = 'Error_%d_key_not_found_%s' % (iteration, exc.args[0])
+ else:
+ unique_key = None
+ if unique_key is not None:
+ out = out.copy()
+ out.update({unique_key: info})
+ else:
+ out.append(info)
+
+ if attributes_list_tag is None and key_fields is None:
+ if len(out) == 1:
+ # flatten the list as only 1 element is expected
+ out = out[0]
+ elif len(out) > 1:
+ # aggregate a list of dictionaries into a single dict
+ # make sure we only have dicts and no key duplication
+ dic = dict()
+ key_count = 0
+ for item in out:
+ if not isinstance(item, dict):
+ # abort if we don't see a dict
+ key_count = -1
+ break
+ dic.update(item)
+ key_count += len(item)
+ if key_count == len(dic):
+ # no duplicates!
+ out = dic
+
+ return out
+
+ def send_ems_event(self):
+ ''' use vserver if available, or cluster vserver '''
+ if self.module.params['vserver']:
+ server = self.server
+ else:
+ results = netapp_utils.get_cserver(self.server)
+ if results is None:
+ # most likely we're on a vserver interface already
+ server = self.server
+ else:
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_info", server)
+
+ def get_all(self, gather_subset):
+ '''Method to get all subsets'''
+
+ self.send_ems_event()
+
+ self.netapp_info['ontapi_version'] = self.ontapi()
+ self.netapp_info['ontap_version'] = self.netapp_info['ontapi_version']
+
+ run_subset = self.get_subset(gather_subset, self.netapp_info['ontapi_version'])
+ if 'ontap_version' in gather_subset:
+ if netapp_utils.has_feature(self.module, 'deprecation_warning'):
+ self.netapp_info['deprecation_warning'] = 'ontap_version is deprecated, please use ontapi_version'
+ if 'help' in gather_subset:
+ self.netapp_info['help'] = sorted(run_subset)
+ else:
+ if self.desired_attributes is not None:
+ if len(run_subset) > 1:
+ self.module.fail_json(msg="desired_attributes option is only supported with a single subset")
+ self.sanitize_desired_attributes()
+ if self.query is not None:
+ if len(run_subset) > 1:
+ self.module.fail_json(msg="query option is only supported with a single subset")
+ self.sanitize_query()
+ for subset in run_subset:
+ call = self.info_subsets[subset]
+ self.netapp_info[subset] = call['method'](**call['kwargs'])
+
+ if self.warnings:
+ self.netapp_info['module_warnings'] = self.warnings
+
+ return self.netapp_info
+
+ def get_subset(self, gather_subset, version):
+ '''Method to get a single subset'''
+
+ runable_subsets = set()
+ exclude_subsets = set()
+ usable_subsets = [key for key in self.info_subsets if version >= self.info_subsets[key]['min_version']]
+ if 'help' in gather_subset:
+ return usable_subsets
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(usable_subsets)
+ return runable_subsets
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ return set()
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in usable_subsets:
+ if subset not in self.info_subsets.keys():
+ self.module.fail_json(msg='Bad subset: %s' % subset)
+ self.module.fail_json(msg='Remote system at version %s does not support %s' %
+ (version, subset))
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(usable_subsets)
+
+ runable_subsets.difference_update(exclude_subsets)
+
+ return runable_subsets
+
+ def get_summary(self, ontap_info):
+ for info in ontap_info:
+ if '_info' in info and ontap_info[info] is not None and isinstance(ontap_info[info], dict):
+ # don't summarize errors
+ if 'error' not in ontap_info[info]:
+ ontap_info[info] = ontap_info[info].keys()
+ return ontap_info
+
+ def sanitize_desired_attributes(self):
+ ''' add top 'desired-attributes' if absent
+ check for _ as more likely ZAPI does not take them
+ '''
+ da_key = 'desired-attributes'
+ if da_key not in self.desired_attributes:
+ desired_attributes = dict()
+ desired_attributes[da_key] = self.desired_attributes
+ self.desired_attributes = desired_attributes
+ self.check_for___in_keys(self.desired_attributes)
+
+ def sanitize_query(self):
+ ''' add top 'query' if absent
+ check for _ as more likely ZAPI does not take them
+ '''
+ key = 'query'
+ if key not in self.query:
+ query = dict()
+ query[key] = self.query
+ self.query = query
+ self.check_for___in_keys(self.query)
+
+ def check_for___in_keys(self, d_param):
+ '''Method to warn on underscore in a ZAPI tag'''
+ if isinstance(d_param, dict):
+ for key, val in d_param.items():
+ self.check_for___in_keys(val)
+ if '_' in key:
+ self.warnings.append("Underscore in ZAPI tag: %s, do you mean '-'?" % key)
+ elif isinstance(d_param, list):
+ for val in d_param:
+ self.check_for___in_keys(val)
+
+ def set_error_flags(self):
+ error_flags = self.module.params['continue_on_error']
+ generic_flags = ('always', 'never')
+ if len(error_flags) > 1:
+ for key in generic_flags:
+ if key in error_flags:
+ self.module.fail_json(msg="%s needs to be the only keyword in 'continue_on_error' option." % key)
+ specific_flags = ('rpc_error', 'missing_vserver_api_error', 'key_error', 'other_error')
+ for key in error_flags:
+ if key not in generic_flags and key not in specific_flags:
+ self.module.fail_json(msg="%s is not a valid keyword in 'continue_on_error' option." % key)
+ self.error_flags = dict()
+ for flag in specific_flags:
+ self.error_flags[flag] = True
+ for key in error_flags:
+ if key == 'always' or key == flag:
+ self.error_flags[flag] = False
+
+
+# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
+def __finditem(obj, key):
+
+ if key in obj:
+ if obj[key] is None:
+ return "None"
+ return obj[key]
+ for dummy, val in obj.items():
+ if isinstance(val, dict):
+ item = __finditem(val, key)
+ if item is not None:
+ return item
+ return None
+
+
+def _finditem(obj, keys):
+ ''' if keys is a string, use it as a key
+ if keys is a tuple, stop on the first valid key
+ if no valid key is found, raise a KeyError '''
+
+ value = None
+ if isinstance(keys, str):
+ value = __finditem(obj, keys)
+ elif isinstance(keys, tuple):
+ for key in keys:
+ value = __finditem(obj, key)
+ if value is not None:
+ break
+ if value is not None:
+ return value
+ raise KeyError(str(keys))
+
+
+def convert_keys(d_param):
+ '''Method to convert hyphen to underscore'''
+
+ if isinstance(d_param, dict):
+ out = {}
+ for key, val in d_param.items():
+ val = convert_keys(val)
+ out[key.replace('-', '_')] = val
+ return out
+ elif isinstance(d_param, list):
+ return [convert_keys(val) for val in d_param]
+ return d_param
+
+
+def main():
+ '''Execute action'''
+
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='info', choices=['info']),
+ gather_subset=dict(default=['all'], type='list', elements='str'),
+ vserver=dict(type='str', required=False),
+ max_records=dict(type='int', default=1024, required=False),
+ summary=dict(type='bool', default=False, required=False),
+ volume_move_target_aggr_info=dict(
+ type="dict",
+ required=False,
+ options=dict(
+ volume_name=dict(type='str', required=True),
+ vserver=dict(type='str', required=True)
+ )
+ ),
+ desired_attributes=dict(type='dict', required=False),
+ use_native_zapi_tags=dict(type='bool', required=False, default=False),
+ continue_on_error=dict(type='list', required=False, elements='str', default=['never']),
+ query=dict(type='dict', required=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_XMLTODICT:
+ module.fail_json(msg="xmltodict missing")
+
+ if not HAS_JSON:
+ module.fail_json(msg="json missing")
+
+ state = module.params['state']
+ gather_subset = module.params['gather_subset']
+ summary = module.params['summary']
+ if gather_subset is None:
+ gather_subset = ['all']
+ max_records = module.params['max_records']
+ gf_obj = NetAppONTAPGatherInfo(module, max_records)
+ gf_all = gf_obj.get_all(gather_subset)
+ if summary:
+ gf_all = gf_obj.get_summary(gf_all)
+ result = {'state': state, 'changed': False}
+ module.exit_json(ontap_info=gf_all, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py
new file mode 100644
index 00000000..9896d6d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py
@@ -0,0 +1,613 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_export_policy_rule
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_interface
+short_description: NetApp ONTAP LIF configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Creating / deleting and modifying the LIF.
+
+options:
+ state:
+ description:
+ - Whether the specified interface should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ interface_name:
+ description:
+ - Specifies the logical interface (LIF) name.
+ required: true
+ type: str
+
+ home_node:
+ description:
+ - Specifies the LIF's home node.
+ - By default, the first node from the cluster is considered as home node
+ type: str
+
+ current_node:
+ description:
+ - Specifies the LIF's current node.
+ - By default, this is home_node
+ type: str
+
+ home_port:
+ description:
+ - Specifies the LIF's home port.
+ - Required when C(state=present)
+ type: str
+
+ current_port:
+ description:
+ - Specifies the LIF's current port.
+ type: str
+
+ role:
+ description:
+ - Specifies the role of the LIF.
+ - When setting role as "intercluster" or "cluster", setting protocol is not supported.
+ - When creating a "cluster" role, the node name will appear as the prefix in the name of LIF.
+ - For example, if the specified name is clif and node name is node1, the LIF name appears in the ONTAP as node1_clif.
+ - Possible values are 'undef', 'cluster', 'data', 'node-mgmt', 'intercluster', 'cluster-mgmt'.
+ - Required when C(state=present) unless service_policy is present and ONTAP version is 9.8 or better.
+ type: str
+
+ address:
+ description:
+ - Specifies the LIF's IP address.
+ - Required when C(state=present) and is_ipv4_link_local if false and subnet_name is not set.
+ type: str
+
+ netmask:
+ description:
+ - Specifies the LIF's netmask.
+ - Required when C(state=present) and is_ipv4_link_local if false and subnet_name is not set.
+ type: str
+
+ is_ipv4_link_local:
+ description:
+ - Specifies the LIF's are to acquire a ipv4 link local address.
+ - Use case for this is when creating Cluster LIFs to allow for auto assignment of ipv4 link local address.
+ version_added: '20.1.0'
+ type: bool
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ firewall_policy:
+ description:
+ - Specifies the firewall policy for the LIF.
+ type: str
+
+ failover_policy:
+ description:
+ - Specifies the failover policy for the LIF.
+ choices: ['disabled', 'system-defined', 'local-only', 'sfo-partner-only', 'broadcast-domain-wide']
+ type: str
+
+ failover_group:
+ description:
+ - Specifies the failover group for the LIF.
+ version_added: '20.1.0'
+ type: str
+
+ subnet_name:
+ description:
+ - Subnet where the interface address is allocated from.
+ - If the option is not used, the IP address will need to be provided by the administrator during configuration.
+ version_added: 2.8.0
+ type: str
+
+ admin_status:
+ choices: ['up', 'down']
+ description:
+ - Specifies the administrative status of the LIF.
+ type: str
+
+ is_auto_revert:
+ description:
+ - If true, data LIF will revert to its home node under certain circumstances such as startup,
+ - and load balancing migration capability is disabled automatically
+ type: bool
+
+ force_subnet_association:
+ description:
+ - Set this to true to acquire the address from the named subnet and assign the subnet to the LIF.
+ version_added: 2.9.0
+ type: bool
+
+ protocols:
+ description:
+ - Specifies the list of data protocols configured on the LIF. By default, the values in this element are nfs, cifs and fcache.
+ - Other supported protocols are iscsi and fcp. A LIF can be configured to not support any data protocols by specifying 'none'.
+ - Protocol values of none, iscsi, fc-nvme or fcp can't be combined with any other data protocol(s).
+ - address, netmask and firewall_policy parameters are not supported for 'fc-nvme' option.
+ type: list
+ elements: str
+
+ dns_domain_name:
+ description:
+ - Specifies the unique, fully qualified domain name of the DNS zone of this LIF.
+ version_added: 2.9.0
+ type: str
+
+ listen_for_dns_query:
+ description:
+ - If True, this IP address will listen for DNS queries for the dnszone specified.
+ version_added: 2.9.0
+ type: bool
+
+ is_dns_update_enabled:
+ description:
+ - Specifies if DNS update is enabled for this LIF. Dynamic updates will be sent for this LIF if updates are enabled at Vserver level.
+ version_added: 2.9.0
+ type: bool
+
+ service_policy:
+ description:
+ - Starting with ONTAP 9.5, you can configure LIF service policies to identify a single service or a list of services that will use a LIF.
+ - In ONTAP 9.5, you can assign service policies only for LIFs in the admin SVM.
+ - In ONTAP 9.6, you can additionally assign service policies for LIFs in the data SVMs.
+ - When you specify a service policy for a LIF, you need not specify the data protocol and role for the LIF.
+ - NOTE that role is still required because of a ZAPI issue. This limitation is removed in ONTAP 9.8.
+ - Creating LIFs by specifying the role and data protocols is also supported.
+ version_added: '20.4.0'
+ type: str
+'''
+
+EXAMPLES = '''
+ - name: Create interface
+ na_ontap_interface:
+ state: present
+ interface_name: data2
+ home_port: e0d
+ home_node: laurentn-vsim1
+ role: data
+ protocols:
+ - nfs
+ - cifs
+ admin_status: up
+ failover_policy: local-only
+ firewall_policy: mgmt
+ is_auto_revert: true
+ address: 10.10.10.10
+ netmask: 255.255.255.0
+ force_subnet_association: false
+ dns_domain_name: test.com
+ listen_for_dns_query: true
+ is_dns_update_enabled: true
+ vserver: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create cluster interface
+ na_ontap_interface:
+ state: present
+ interface_name: cluster_lif
+ home_port: e0a
+ home_node: cluster1-01
+ role: cluster
+ admin_status: up
+ is_auto_revert: true
+ is_ipv4_link_local: true
+ vserver: Cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Migrate an interface
+ na_ontap_interface:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ vserver: ansible
+ https: true
+ validate_certs: false
+ state: present
+ interface_name: carchi_interface3
+ home_port: e0d
+ home_node: ansdev-stor-1
+ current_node: ansdev-stor-2
+ role: data
+ failover_policy: local-only
+ firewall_policy: mgmt
+ is_auto_revert: true
+ address: 10.10.10.12
+ netmask: 255.255.255.0
+ force_subnet_association: false
+ admin_status: up
+
+ - name: Delete interface
+ na_ontap_interface:
+ state: absent
+ interface_name: data2
+ vserver: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapInterface(object):
+ ''' object to describe interface info '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=[
+ 'present', 'absent'], default='present'),
+ interface_name=dict(required=True, type='str'),
+ home_node=dict(required=False, type='str', default=None),
+ current_node=dict(required=False, type='str'),
+ home_port=dict(required=False, type='str'),
+ current_port=dict(required=False, type='str'),
+ role=dict(required=False, type='str'),
+ is_ipv4_link_local=dict(required=False, type='bool', default=None),
+ address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ firewall_policy=dict(required=False, type='str', default=None),
+ failover_policy=dict(required=False, type='str', default=None,
+ choices=['disabled', 'system-defined',
+ 'local-only', 'sfo-partner-only', 'broadcast-domain-wide']),
+ failover_group=dict(required=False, type='str'),
+ admin_status=dict(required=False, choices=['up', 'down']),
+ subnet_name=dict(required=False, type='str'),
+ is_auto_revert=dict(required=False, type='bool', default=None),
+ protocols=dict(required=False, type='list', elements='str'),
+ force_subnet_association=dict(required=False, type='bool', default=None),
+ dns_domain_name=dict(required=False, type='str'),
+ listen_for_dns_query=dict(required=False, type='bool'),
+ is_dns_update_enabled=dict(required=False, type='bool'),
+ service_policy=dict(required=False, type='str', default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ['subnet_name', 'address'],
+ ['subnet_name', 'netmask'],
+ ['is_ipv4_link_local', 'address'],
+ ['is_ipv4_link_local', 'netmask'],
+ ['is_ipv4_link_local', 'subnet_name']
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_interface(self):
+ """
+ Return details about the interface
+ :param:
+ name : Name of the interface
+
+ :return: Details about the interface. None if not found.
+ :rtype: dict
+ """
+ interface_info = netapp_utils.zapi.NaElement('net-interface-get-iter')
+ interface_attributes = netapp_utils.zapi.NaElement('net-interface-info')
+ interface_attributes.add_new_child('interface-name', self.parameters['interface_name'])
+ interface_attributes.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(interface_attributes)
+ interface_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(interface_info, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error fetching interface details for %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ interface_attributes = result.get_child_by_name('attributes-list'). \
+ get_child_by_name('net-interface-info')
+ return_value = {
+ 'interface_name': self.parameters['interface_name'],
+ 'admin_status': interface_attributes['administrative-status'],
+ 'home_port': interface_attributes['home-port'],
+ 'home_node': interface_attributes['home-node'],
+ 'failover_policy': interface_attributes['failover-policy'].replace('_', '-'),
+ }
+ if interface_attributes.get_child_by_name('is-auto-revert'):
+ return_value['is_auto_revert'] = True if interface_attributes['is-auto-revert'] == 'true' else False
+ if interface_attributes.get_child_by_name('failover-group'):
+ return_value['failover_group'] = interface_attributes['failover-group']
+ if interface_attributes.get_child_by_name('address'):
+ return_value['address'] = interface_attributes['address']
+ if interface_attributes.get_child_by_name('netmask'):
+ return_value['netmask'] = interface_attributes['netmask']
+ if interface_attributes.get_child_by_name('firewall-policy'):
+ return_value['firewall_policy'] = interface_attributes['firewall-policy']
+ if interface_attributes.get_child_by_name('dns-domain-name') != 'none':
+ return_value['dns_domain_name'] = interface_attributes['dns-domain-name']
+ else:
+ return_value['dns_domain_name'] = None
+ if interface_attributes.get_child_by_name('listen-for-dns-query'):
+ return_value['listen_for_dns_query'] = self.na_helper.get_value_for_bool(True, interface_attributes[
+ 'listen-for-dns-query'])
+ if interface_attributes.get_child_by_name('is-dns-update-enabled'):
+ return_value['is_dns_update_enabled'] = self.na_helper.get_value_for_bool(True, interface_attributes[
+ 'is-dns-update-enabled'])
+ if interface_attributes.get_child_by_name('service-policy'):
+ return_value['service_policy'] = interface_attributes['service-policy']
+ if interface_attributes.get_child_by_name('current-node'):
+ return_value['current_node'] = interface_attributes['current-node']
+ if interface_attributes.get_child_by_name('current-port'):
+ return_value['current_port'] = interface_attributes['current-port']
+ return return_value
+
+ @staticmethod
+ def set_options(options, parameters):
+ """ set attributes for create or modify """
+ if parameters.get('role') is not None:
+ options['role'] = parameters['role']
+ if parameters.get('home_node') is not None:
+ options['home-node'] = parameters['home_node']
+ if parameters.get('home_port') is not None:
+ options['home-port'] = parameters['home_port']
+ if parameters.get('subnet_name') is not None:
+ options['subnet-name'] = parameters['subnet_name']
+ if parameters.get('address') is not None:
+ options['address'] = parameters['address']
+ if parameters.get('netmask') is not None:
+ options['netmask'] = parameters['netmask']
+ if parameters.get('failover_policy') is not None:
+ options['failover-policy'] = parameters['failover_policy']
+ if parameters.get('failover_group') is not None:
+ options['failover-group'] = parameters['failover_group']
+ if parameters.get('firewall_policy') is not None:
+ options['firewall-policy'] = parameters['firewall_policy']
+ if parameters.get('is_auto_revert') is not None:
+ options['is-auto-revert'] = 'true' if parameters['is_auto_revert'] is True else 'false'
+ if parameters.get('admin_status') is not None:
+ options['administrative-status'] = parameters['admin_status']
+ if parameters.get('force_subnet_association') is not None:
+ options['force-subnet-association'] = 'true' if parameters['force_subnet_association'] else 'false'
+ if parameters.get('dns_domain_name') is not None:
+ options['dns-domain-name'] = parameters['dns_domain_name']
+ if parameters.get('listen_for_dns_query') is not None:
+ options['listen-for-dns-query'] = str(parameters['listen_for_dns_query'])
+ if parameters.get('is_dns_update_enabled') is not None:
+ options['is-dns-update-enabled'] = str(parameters['is_dns_update_enabled'])
+ if parameters.get('is_ipv4_link_local') is not None:
+ options['is-ipv4-link-local'] = 'true' if parameters['is_ipv4_link_local'] else 'false'
+ if parameters.get('service_policy') is not None:
+ options['service-policy'] = parameters['service_policy']
+
+ def set_protocol_option(self, required_keys):
+ """ set protocols for create """
+ if self.parameters.get('protocols') is not None:
+ data_protocols_obj = netapp_utils.zapi.NaElement('data-protocols')
+ for protocol in self.parameters.get('protocols'):
+ if protocol.lower() in ['fc-nvme', 'fcp']:
+ if 'address' in required_keys:
+ required_keys.remove('address')
+ if 'home_port' in required_keys:
+ required_keys.remove('home_port')
+ if 'netmask' in required_keys:
+ required_keys.remove('netmask')
+ not_required_params = set(['address', 'netmask', 'firewall_policy'])
+ if not not_required_params.isdisjoint(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error: Following parameters for creating interface are not supported'
+ ' for data-protocol fc-nvme: %s' % ', '.join(not_required_params))
+ data_protocols_obj.add_new_child('data-protocol', protocol)
+ return data_protocols_obj
+ return None
+
+ def get_home_node_for_cluster(self):
+ ''' get the first node name from this cluster '''
+ get_node = netapp_utils.zapi.NaElement('cluster-node-get-iter')
+ attributes = {
+ 'query': {
+ 'cluster-node-info': {}
+ }
+ }
+ get_node.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ if str(exc.code) == '13003' or exc.message == 'ZAPI is not enabled in pre-cluster mode.':
+ return None
+ self.module.fail_json(msg='Error fetching node for interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes = result.get_child_by_name('attributes-list')
+ return attributes.get_child_by_name('cluster-node-info').get_child_content('node-name')
+ return None
+
+ def validate_create_parameters(self, keys):
+ '''
+ Validate if required parameters for create are present.
+ Parameter requirement might vary based on given data-protocol.
+ :return: None
+ '''
+ if self.parameters.get('home_node') is None:
+ node = self.get_home_node_for_cluster()
+ if node is not None:
+ self.parameters['home_node'] = node
+ # validate if mandatory parameters are present for create
+ if not keys.issubset(set(self.parameters.keys())) and self.parameters.get('subnet_name') is None:
+ self.module.fail_json(msg='Error: Missing one or more required parameters for creating interface: %s'
+ % ', '.join(keys))
+ # if role is intercluster, protocol cannot be specified
+ if self.parameters.get('role') == "intercluster" and self.parameters.get('protocols') is not None:
+ self.module.fail_json(msg='Error: Protocol cannot be specified for intercluster role,'
+ 'failed to create interface')
+
+ def create_interface(self):
+ ''' calling zapi to create interface '''
+ required_keys = set(['role', 'home_port'])
+ data_protocols_obj = None
+ if self.parameters.get('subnet_name') is None:
+ if self.parameters.get('is_ipv4_link_local') is not None:
+ if not self.parameters.get('is_ipv4_link_local'):
+ required_keys.add('address')
+ required_keys.add('netmask')
+ if self.parameters.get('service_policy') is not None:
+ required_keys.remove('role')
+ data_protocols_obj = self.set_protocol_option(required_keys)
+ self.validate_create_parameters(required_keys)
+
+ options = {'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']}
+ NetAppOntapInterface.set_options(options, self.parameters)
+ interface_create = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-create', **options)
+ if data_protocols_obj is not None:
+ interface_create.add_child_elem(data_protocols_obj)
+ try:
+ self.server.invoke_successfully(interface_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ # msg: "Error Creating interface ansible_interface: NetApp API failed. Reason - 17:A LIF with the same name already exists"
+ if to_native(exc.code) == "17":
+ self.na_helper.changed = False
+ else:
+ self.module.fail_json(msg='Error Creating interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_interface(self, current_status):
+ ''' calling zapi to delete interface '''
+ if current_status == 'up':
+ self.parameters['admin_status'] = 'down'
+ self.modify_interface({'admin_status': 'down'})
+
+ interface_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-interface-delete', **{'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']})
+ try:
+ self.server.invoke_successfully(interface_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def modify_interface(self, modify):
+ """
+ Modify the interface.
+ """
+ # Current_node and current_port don't exist in modify only migrate, so we need to remove them from the list
+ migrate = {}
+ if modify.get('current_node') is not None:
+ migrate['current_node'] = modify.pop('current_node')
+ if modify.get('current_port') is not None:
+ migrate['current_port'] = modify.pop('current_port')
+ if len(modify) > 0:
+ options = {'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']
+ }
+ NetAppOntapInterface.set_options(options, modify)
+ interface_modify = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-modify', **options)
+ try:
+ self.server.invoke_successfully(interface_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg='Error modifying interface %s: %s' %
+ (self.parameters['interface_name'], to_native(err)),
+ exception=traceback.format_exc())
+ # if home node has been changed we need to migrate the interface
+ if len(migrate) > 0:
+ self.migrate_interface()
+
+ def migrate_interface(self):
+ interface_migrate = netapp_utils.zapi.NaElement('net-interface-migrate')
+ if self.parameters.get('current_node') is None:
+ self.module.fail_json(msg='current_node must be set to migrate')
+ interface_migrate.add_new_child('destination-node', self.parameters['current_node'])
+ if self.parameters.get('current_port') is not None:
+ interface_migrate.add_new_child('destination-port', self.parameters['current_port'])
+ interface_migrate.add_new_child('lif', self.parameters['interface_name'])
+ interface_migrate.add_new_child('vserver', self.parameters['vserver'])
+ try:
+ self.server.invoke_successfully(interface_migrate, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching migrating %s: %s'
+ % (self.parameters['current_node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_interface", cserver)
+
+ def apply(self):
+ ''' calling all interface features '''
+
+ # Checking to see if autosupport_log() can be ran as this is a post cluster setup request.
+ try:
+ self.autosupport_log()
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13003 denotes cluster does not exist. It happens when running operations on a node not in cluster.
+ if to_native(error.code) == "13003":
+ pass
+ else:
+ self.module.fail_json(msg='Error calling autosupport_log(): %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ current = self.get_interface()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_interface()
+ elif cd_action == 'delete':
+ self.delete_interface(current['admin_status'])
+ elif modify:
+ self.modify_interface(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ interface = NetAppOntapInterface()
+ interface.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py
new file mode 100644
index 00000000..02ceb7c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+"""
+this is ipspace module
+
+# (c) 2018, NTT Europe Ltd.
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: na_ontap_ipspace
+
+short_description: NetApp ONTAP Manage an ipspace
+
+version_added: 2.9.0
+
+author:
+ - NTTE Storage Engineering (@vicmunoz) <cl.eng.sto@ntt.eu>
+
+description:
+ - Manage an ipspace for an Ontap Cluster
+
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+
+options:
+ state:
+ description:
+ - Whether the specified ipspace should exist or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the ipspace to manage
+ required: true
+ type: str
+ from_name:
+ description:
+ - Name of the existing ipspace to be renamed to name
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create ipspace
+ na_ontap_ipspace:
+ state: present
+ name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete ipspace
+ na_ontap_ipspace:
+ state: absent
+ name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Rename ipspace
+ na_ontap_ipspace:
+ state: present
+ name: ansibleIpspace_newname
+ from_name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapIpspace(object):
+ '''Class with ipspace operations'''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def ipspace_get_iter(self, name):
+ """
+ Return net-ipspaces-get-iter query results
+ :param name: Name of the ipspace
+ :return: NaElement if ipspace found, None otherwise
+ """
+ ipspace_get_iter = netapp_utils.zapi.NaElement('net-ipspaces-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-info', **{'ipspace': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ ipspace_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(
+ ipspace_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 14636 denotes an ipspace does not exist
+ # Error 13073 denotes an ipspace not found
+ if to_native(error.code) == "14636" or to_native(error.code) == "13073":
+ return None
+ else:
+ self.module.fail_json(
+ msg=to_native(error),
+ exception=traceback.format_exc())
+ return result
+
+ def get_ipspace(self, name=None):
+ """
+ Fetch details if ipspace exists
+ :param name: Name of the ipspace to be fetched
+ :return:
+ Dictionary of current details if ipspace found
+ None if ipspace is not found
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ api = 'network/ipspaces'
+ params = None
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ for record in message['records']:
+ if record['name'] == name:
+ return record
+ return None
+ else:
+ ipspace_get = self.ipspace_get_iter(name)
+ if (ipspace_get and ipspace_get.get_child_by_name('num-records') and
+ int(ipspace_get.get_child_content('num-records')) >= 1):
+ current_ipspace = dict()
+ attr_list = ipspace_get.get_child_by_name('attributes-list')
+ attr = attr_list.get_child_by_name('net-ipspaces-info')
+ current_ipspace['name'] = attr.get_child_content('ipspace')
+ return current_ipspace
+ return None
+
+ def create_ipspace(self):
+ """
+ Create ipspace
+ :return: None
+ """
+ if self.use_rest:
+ api = 'network/ipspaces'
+ params = {'name': self.parameters['name']}
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ ipspace_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-create', **{'ipspace': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(ipspace_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error provisioning ipspace %s: %s" % (
+ self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ipspace(self):
+ """
+ Destroy ipspace
+ :return: None
+ """
+ if self.use_rest:
+ current = self.get_ipspace()
+ if current is not None:
+ uuid = current['uuid']
+ api = 'network/ipspaces/' + uuid
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ ipspace_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-destroy',
+ **{'ipspace': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(
+ ipspace_destroy, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error removing ipspace %s: %s" % (
+ self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_ipspace(self):
+ """
+ Rename an ipspace
+ :return: Nothing
+ """
+ if self.use_rest:
+ current = self.get_ipspace(self.parameters['from_name'])
+ if current is None:
+ self.module.fail_json(msg="Error renaming ipspace %s" % (self.parameters['from_name']))
+ uuid = current['uuid']
+ api = 'network/ipspaces/' + uuid
+ params = {'name': self.parameters['name']}
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ ipspace_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-rename',
+ **{'ipspace': self.parameters['from_name'],
+ 'new-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(ipspace_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error renaming ipspace %s: %s" % (
+ self.parameters['from_name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to the ipspace
+ :return: Nothing
+ """
+ current = self.get_ipspace()
+ # rename and create are mutually exclusive
+ rename, cd_action = None, None
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(
+ self.get_ipspace(self.parameters['from_name']),
+ current)
+ if rename is None:
+ self.module.fail_json(
+ msg="Error renaming: ipspace %s does not exist" %
+ self.parameters['from_name'])
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_ipspace()
+ elif cd_action == 'create':
+ self.create_ipspace()
+ elif cd_action == 'delete':
+ self.delete_ipspace()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action
+ :return: nothing
+ """
+ obj = NetAppOntapIpspace()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py
new file mode 100644
index 00000000..8975327a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+
+# (c) 2017-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_iscsi
+
+short_description: NetApp ONTAP manage iSCSI service
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- create, delete, start, stop iSCSI service on SVM.
+
+options:
+
+ state:
+ description:
+ - Whether the service should be present or deleted.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ service_state:
+ description:
+ - Whether the specified service should running.
+ choices: ['started', 'stopped']
+ type: str
+
+ vserver:
+ required: true
+ type: str
+ description:
+ - The name of the vserver to use.
+
+'''
+
+EXAMPLES = """
+- name: Create iscsi service
+ na_ontap_iscsi:
+ state: present
+ service_state: started
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Stop Iscsi service
+ na_ontap_iscsi:
+ state: present
+ service_state: stopped
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Delete Iscsi service
+ na_ontap_iscsi:
+ state: absent
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapISCSI(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, type='str', choices=['started', 'stopped'], default=None),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ params = self.module.params
+
+ # set up state variables
+ self.state = params['state']
+ self.service_state = params['service_state']
+ if self.state == 'present' and self.service_state is None:
+ self.service_state = 'started'
+ self.vserver = params['vserver']
+ self.is_started = None
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.vserver)
+
+ def get_iscsi(self):
+ """
+ Return details about the iscsi service
+
+ :return: Details about the iscsi service
+ :rtype: dict
+ """
+ iscsi_info = netapp_utils.zapi.NaElement('iscsi-service-get-iter')
+ iscsi_attributes = netapp_utils.zapi.NaElement('iscsi-service-info')
+
+ iscsi_attributes.add_new_child('vserver', self.vserver)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(iscsi_attributes)
+
+ iscsi_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(iscsi_info, True)
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ iscsi = result.get_child_by_name(
+ 'attributes-list').get_child_by_name('iscsi-service-info')
+ if iscsi:
+ is_started = iscsi.get_child_content('is-available') == 'true'
+ return_value = {
+ 'is_started': is_started
+ }
+
+ return return_value
+
+ def create_iscsi_service(self):
+ """
+ Create iscsi service and start if requested
+ """
+ iscsi_service = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-create',
+ **{'start': 'true' if self.state == 'started' else 'false'
+ })
+
+ try:
+ self.server.invoke_successfully(
+ iscsi_service, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error creating iscsi service: % s"
+ % (to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_iscsi_service(self):
+ """
+ Delete the iscsi service
+ """
+ if self.is_started:
+ self.stop_iscsi_service()
+
+ iscsi_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-destroy')
+
+ try:
+ self.server.invoke_successfully(
+ iscsi_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting iscsi service \
+ on vserver %s: %s"
+ % (self.vserver, to_native(e)),
+ exception=traceback.format_exc())
+
+ def stop_iscsi_service(self):
+ """
+ Stop iscsi service
+ """
+
+ iscsi_stop = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-stop')
+
+ try:
+ self.server.invoke_successfully(iscsi_stop, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error Stopping iscsi service \
+ on vserver %s: %s"
+ % (self.vserver, to_native(e)),
+ exception=traceback.format_exc())
+
+ def start_iscsi_service(self):
+ """
+ Start iscsi service
+ """
+ iscsi_start = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-start')
+
+ try:
+ self.server.invoke_successfully(iscsi_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error starting iscsi service \
+ on vserver %s: %s"
+ % (self.vserver, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ property_changed = False
+ iscsi_service_exists = False
+ netapp_utils.ems_log_event("na_ontap_iscsi", self.server)
+ iscsi_service_detail = self.get_iscsi()
+
+ if iscsi_service_detail:
+ self.is_started = iscsi_service_detail['is_started']
+ iscsi_service_exists = True
+
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ is_started = 'started' if self.is_started else 'stopped'
+ property_changed = is_started != self.service_state
+
+ else:
+ if self.state == 'present':
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not iscsi_service_exists:
+ self.create_iscsi_service() # the service is stopped when initially created
+ if self.service_state == 'started':
+ self.start_iscsi_service()
+ if iscsi_service_exists and self.service_state == 'stopped':
+ self.stop_iscsi_service()
+
+ elif self.state == 'absent':
+ self.delete_iscsi_service()
+
+ changed = property_changed
+ # TODO: include other details about the lun (size, etc.)
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppOntapISCSI()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py
new file mode 100644
index 00000000..b1b16cd1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_iscsi_security
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Modify iscsi security.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_iscsi_security
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified initiator should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ auth_type:
+ description:
+ - Specifies the authentication type.
+ choices: ['chap', 'none', 'deny']
+ type: str
+ initiator:
+ description:
+ - Specifies the name of the initiator.
+ required: true
+ type: str
+ address_ranges:
+ description:
+ - May be a single IPv4 or IPv6 address or a range containing a startaddress and an end address.
+ - The start and end addresses themselves are included in the range.
+ - If not present, the initiator is allowed to log in from any IP address.
+ type: list
+ elements: str
+ inbound_username:
+ description:
+ - Inbound CHAP username.
+ - Required for CHAP. A null username is not allowed.
+ type: str
+ inbound_password:
+ description:
+ - Inbound CHAP user password.
+ - Can not be modified. If want to change password, delete and re-create the initiator.
+ type: str
+ outbound_username:
+ description:
+ - Outbound CHAP user name.
+ type: str
+ outbound_password:
+ description:
+ - Outbound CHAP user password.
+ - Can not be modified. If want to change password, delete and re-create the initiator.
+ type: str
+short_description: "NetApp ONTAP Manage iscsi security."
+version_added: "19.10.1"
+'''
+
+EXAMPLES = """
+ - name: create
+ na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ inbound_username: user_1
+ inbound_password: password_1
+ outbound_username: user_2
+ outbound_password: password_2
+ auth_type: chap
+ address_ranges: 10.125.10.0-10.125.10.10,10.125.193.78
+
+ - name: modify outbound username
+ na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ inbound_username: user_1
+ inbound_password: password_1
+ outbound_username: user_out_3
+ outbound_password: password_3
+ auth_type: chap
+ address_ranges: 10.125.10.0-10.125.10.10,10.125.193.78
+
+ - name: modify address
+ na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ address_ranges: 10.125.193.90,10.125.10.20-10.125.10.30
+"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPIscsiSecurity(object):
+ """
+ Class with iscsi security methods
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ auth_type=dict(required=False, type='str', choices=['chap', 'none', 'deny']),
+ inbound_password=dict(required=False, type='str', no_log=True),
+ inbound_username=dict(required=False, type='str'),
+ initiator=dict(required=True, type='str'),
+ address_ranges=dict(required=False, type='list', elements='str'),
+ outbound_password=dict(required=False, type='str', no_log=True),
+ outbound_username=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['auth_type', 'chap', ['inbound_username', 'inbound_password']]
+ ],
+ required_together=[
+ ['inbound_username', 'inbound_password'],
+ ['outbound_username', 'outbound_password'],
+ ],
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.uuid = self.get_svm_uuid()
+
+ def get_initiator(self):
+ """
+ Get current initiator.
+ :return: dict of current initiator details.
+ """
+ params = {'fields': '*', 'initiator': self.parameters['initiator']}
+ api = '/protocols/san/iscsi/credentials/'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching initiator: %s" % error)
+ if message['num_records'] > 0:
+ record = message['records'][0]
+ initiator_details = dict()
+ initiator_details['auth_type'] = record['authentication_type']
+ if initiator_details['auth_type'] == 'chap':
+ if record['chap'].get('inbound'):
+ initiator_details['inbound_username'] = record['chap']['inbound']['user']
+ else:
+ initiator_details['inbound_username'] = None
+ if record['chap'].get('outbound'):
+ initiator_details['outbound_username'] = record['chap']['outbound']['user']
+ else:
+ initiator_details['outbound_username'] = None
+ if record.get('initiator_address'):
+ if record['initiator_address'].get('ranges'):
+ ranges = []
+ for address_range in record['initiator_address']['ranges']:
+ if address_range['start'] == address_range['end']:
+ ranges.append(address_range['start'])
+ else:
+ ranges.append(address_range['start'] + '-' + address_range['end'])
+ initiator_details['address_ranges'] = ranges
+ else:
+ initiator_details['address_ranges'] = None
+ return initiator_details
+
+ def create_initiator(self):
+ """
+ Create initiator.
+ :return: None.
+ """
+ params = dict()
+ params['authentication_type'] = self.parameters['auth_type']
+ params['initiator'] = self.parameters['initiator']
+ if self.parameters['auth_type'] == 'chap':
+ chap_info = dict()
+ chap_info['inbound'] = {'user': self.parameters['inbound_username'], 'password': self.parameters['inbound_password']}
+ if self.parameters.get('outbound_username'):
+ chap_info['outbound'] = {'user': self.parameters['outbound_username'], 'password': self.parameters['outbound_password']}
+ params['chap'] = chap_info
+ address_info = self.get_address_info(self.parameters.get('address_ranges'))
+ if address_info is not None:
+ params['initiator_address'] = {'ranges': address_info}
+ params['svm'] = {'uuid': self.uuid, 'name': self.parameters['vserver']}
+ api = '/protocols/san/iscsi/credentials'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating initiator: %s" % error)
+
+ def delete_initiator(self):
+ """
+ Delete initiator.
+ :return: None.
+ """
+ api = '/protocols/san/iscsi/credentials/{0}/{1}'.format(self.uuid, self.parameters['initiator'])
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting initiator: %s" % error)
+
+ def modify_initiator(self, modify, current):
+ """
+ Modify initiator.
+ :param modify: dict of modify attributes.
+ :return: None.
+ """
+ params = dict()
+ use_chap = False
+ chap_update = False
+ chap_update_inbound = False
+ chap_update_outbound = False
+
+ if modify.get('auth_type') and modify['auth_type'] == 'chap':
+ # change in auth_type
+ chap_update = True
+ use_chap = True
+ elif current.get('auth_type') == 'chap':
+ # we're already using chap
+ use_chap = True
+
+ if use_chap and (modify.get('inbound_username') or modify.get('inbound_password')):
+ # change in chap inbound credentials
+ chap_update = True
+ chap_update_inbound = True
+
+ if use_chap and (modify.get('outbound_username') or modify.get('outbound_password')):
+ # change in chap outbound credentials
+ chap_update = True
+ chap_update_outbound = True
+
+ if chap_update:
+ chap_info = dict()
+ # set values from self.parameters as they may not show as modified
+ if chap_update_inbound:
+ chap_info['inbound'] = {'user': self.parameters['inbound_username'], 'password': self.parameters['inbound_password']}
+ else:
+ # use current values as inbound username/password are required
+ chap_info['inbound'] = {'user': current.get('inbound_username'), 'password': current.get('inbound_password')}
+ if chap_update_outbound:
+ chap_info['outbound'] = {'user': self.parameters['outbound_username'], 'password': self.parameters['outbound_password']}
+
+ params['chap'] = chap_info
+ address_info = self.get_address_info(modify.get('address_ranges'))
+ if address_info is not None:
+ params['initiator_address'] = {'ranges': address_info}
+ api = '/protocols/san/iscsi/credentials/{0}/{1}'.format(self.uuid, self.parameters['initiator'])
+ dummy, error = self.rest_api.patch(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying initiator: %s" % error)
+
+ def get_address_info(self, address_ranges):
+ if address_ranges is None:
+ return None
+ else:
+ address_info = []
+ for address in address_ranges:
+ address_range = {}
+ if '-' in address:
+ address_range['end'] = address.split('-')[1]
+ address_range['start'] = address.split('-')[0]
+ else:
+ address_range['end'] = address
+ address_range['start'] = address
+ address_info.append(address_range)
+ return address_info
+
+ def apply(self):
+ """
+ check create/delete/modify operations if needed.
+ :return: None.
+ """
+ current = self.get_initiator()
+ action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if action == 'create':
+ self.create_initiator()
+ elif action == 'delete':
+ self.delete_initiator()
+ elif modify:
+ self.modify_initiator(modify, current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def get_svm_uuid(self):
+ """
+ Get a svm's UUID
+ :return: uuid of the svm.
+ """
+ params = {'fields': 'uuid', 'name': self.parameters['vserver']}
+ api = "svm/svms"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching svm uuid: %s" % error)
+ return message['records'][0]['uuid']
+
+
+def main():
+ """Execute action"""
+ iscsi_obj = NetAppONTAPIscsiSecurity()
+ iscsi_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py
new file mode 100644
index 00000000..db2c3a1f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_job_schedule
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_job_schedule
+short_description: NetApp ONTAP Job Schedule
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Delete/Modify job-schedules on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified job schedule should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the job-schedule to manage.
+ required: true
+ type: str
+ job_minutes:
+ description:
+ - The minute(s) of each hour when the job should be run.
+ Job Manager cron scheduling minute.
+ -1 represents all minutes and is
+ only supported for cron schedule create and modify.
+ Range is [-1..59]
+ type: list
+ elements: str
+ job_hours:
+ version_added: 2.8.0
+ description:
+ - The hour(s) of the day when the job should be run.
+ Job Manager cron scheduling hour.
+ -1 represents all hours and is
+ only supported for cron schedule create and modify.
+ Range is [-1..23]
+ type: list
+ elements: str
+ job_months:
+ version_added: 2.8.0
+ description:
+ - The month(s) when the job should be run.
+ Job Manager cron scheduling month.
+ -1 represents all months and is
+ only supported for cron schedule create and modify.
+ Range is [-1..11]
+ type: list
+ elements: str
+ job_days_of_month:
+ version_added: 2.8.0
+ description:
+ - The day(s) of the month when the job should be run.
+ Job Manager cron scheduling day of month.
+ -1 represents all days of a month from 1 to 31, and is
+ only supported for cron schedule create and modify.
+ Range is [-1..31]
+ type: list
+ elements: str
+ job_days_of_week:
+ version_added: 2.8.0
+ description:
+ - The day(s) in the week when the job should be run.
+ Job Manager cron scheduling day of week.
+ Zero represents Sunday. -1 represents all days of a week and is
+ only supported for cron schedule create and modify.
+ Range is [-1..6]
+ type: list
+ elements: str
+'''
+
+EXAMPLES = """
+ - name: Create Job for 11.30PM at 10th of every month
+ na_ontap_job_schedule:
+ state: present
+ name: jobName
+ job_minutes: 30
+ job_hours: 23
+ job_days_of_month: 10
+ job_months: -1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete Job
+ na_ontap_job_schedule:
+ state: absent
+ name: jobName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPJob(object):
+ '''Class with job schedule cron methods'''
+
+ def __init__(self):
+
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ job_minutes=dict(required=False, type='list', elements='str'),
+ job_months=dict(required=False, type='list', elements='str'),
+ job_hours=dict(required=False, type='list', elements='str'),
+ job_days_of_month=dict(required=False, type='list', elements='str'),
+ job_days_of_week=dict(required=False, type='list', elements='str')
+ ))
+
+ self.uuid = None
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+ self.set_playbook_api_key_map()
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'name': 'job-schedule-name',
+ }
+ self.na_helper.zapi_list_keys = {
+ 'job_minutes': ('job-schedule-cron-minute', 'cron-minute'),
+ 'job_months': ('job-schedule-cron-month', 'cron-month'),
+ 'job_hours': ('job-schedule-cron-hour', 'cron-hour'),
+ 'job_days_of_month': ('job-schedule-cron-day', 'cron-day-of-month'),
+ 'job_days_of_week': ('job-schedule-cron-day-of-week', 'cron-day-of-week')
+ }
+
+ def set_playbook_api_key_map(self):
+ self.na_helper.api_list_keys = {
+ 'job_minutes': 'minutes',
+ 'job_months': 'months',
+ 'job_hours': 'hours',
+ 'job_days_of_month': 'days',
+ 'job_days_of_week': 'weekdays'
+ }
+
+ def get_job_schedule(self):
+ """
+ Return details about the job
+ :param:
+ name : Job name
+ :return: Details about the Job. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ params = {'name': self.parameters['name']}
+ api = '/cluster/schedules'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching job schedule: %s" % error)
+ if message['num_records'] > 0:
+ self.uuid = message['records'][0]['uuid']
+ job_details = dict()
+ job_details['name'] = message['records'][0]['name']
+ for key, value in self.na_helper.api_list_keys.items():
+ if value in message['records'][0]['cron']:
+ job_details[key] = message['records'][0]['cron'][value]
+ # convert list of int to list of string
+ for key, value in job_details.items():
+ if isinstance(value, list):
+ job_details[key] = [str(x) for x in value]
+ return job_details
+
+ else:
+ job_get_iter = netapp_utils.zapi.NaElement('job-schedule-cron-get-iter')
+ job_get_iter.translate_struct({
+ 'query': {
+ 'job-schedule-cron-info': {
+ 'job-schedule-name': self.parameters['name']
+ }
+ }
+ })
+ result = self.server.invoke_successfully(job_get_iter, True)
+ job_details = None
+ # check if job exists
+ if result.get_child_by_name('num-records') and int(result['num-records']) >= 1:
+ job_info = result['attributes-list']['job-schedule-cron-info']
+ job_details = dict()
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ job_details[item_key] = job_info[zapi_key]
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ job_details[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=job_info.get_child_by_name(parent)
+ )
+ # if any of the job_hours, job_minutes, job_months, job_days are empty:
+ # it means the value is -1 for ZAPI
+ if not job_details[item_key]:
+ job_details[item_key] = ['-1']
+ return job_details
+
+ def add_job_details(self, na_element_object, values):
+ """
+ Add children node for create or modify NaElement object
+ :param na_element_object: modify or create NaElement object
+ :param values: dictionary of cron values to be added
+ :return: None
+ """
+ for item_key in values:
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ na_element_object[zapi_key] = values[item_key]
+ elif item_key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key)
+ na_element_object.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent=parent_key,
+ zapi_child=child_key,
+ data=values.get(item_key)))
+
+ def create_job_schedule(self):
+ """
+ Creates a job schedule
+ """
+ # job_minutes is mandatory for create
+ if self.parameters.get('job_minutes') is None:
+ self.module.fail_json(msg='Error: missing required parameter job_minutes for create')
+
+ if self.use_rest:
+ cron = dict()
+ for key, value in self.na_helper.api_list_keys.items():
+ # -1 means all in zapi, while empty means all in api.
+ if self.parameters.get(key):
+ if len(self.parameters[key]) == 1 and int(self.parameters[key][0]) == -1:
+ # need to set empty value for minutes as this is required parameter
+ if value == 'minutes':
+ cron[value] = []
+ else:
+ cron[value] = self.parameters[key]
+
+ params = {
+ 'name': self.parameters['name'],
+ 'cron': cron
+ }
+ api = '/cluster/schedules'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating job schedule: %s" % error)
+
+ else:
+ job_schedule_create = netapp_utils.zapi.NaElement('job-schedule-cron-create')
+ self.add_job_details(job_schedule_create, self.parameters)
+ try:
+ self.server.invoke_successfully(job_schedule_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_job_schedule(self):
+ """
+ Delete a job schedule
+ """
+ if self.use_rest:
+ api = '/cluster/schedules/' + self.uuid
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting job schedule: %s" % error)
+ else:
+ job_schedule_delete = netapp_utils.zapi.NaElement('job-schedule-cron-destroy')
+ self.add_job_details(job_schedule_delete, self.parameters)
+ try:
+ self.server.invoke_successfully(job_schedule_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_job_schedule(self, params, current):
+ """
+ modify a job schedule
+ """
+ if self.use_rest:
+ cron = dict()
+ for key, value in self.na_helper.api_list_keys.items():
+ # -1 means all in zapi, while empty means all in api.
+ if params.get(key):
+ if len(self.parameters[key]) == 1 and int(self.parameters[key][0]) == -1:
+ pass
+ else:
+ cron[value] = self.parameters[key]
+ # Usually only include modify attributes, but omitting an attribute means all in api.
+ # Need to add the current attributes in params.
+ elif current.get(key):
+ cron[value] = current[key]
+ params = {
+ 'cron': cron
+ }
+ api = '/cluster/schedules/' + self.uuid
+ dummy, error = self.rest_api.patch(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying job schedule: %s" % error)
+ else:
+ job_schedule_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'job-schedule-cron-modify', **{'job-schedule-name': self.parameters['name']})
+ self.add_job_details(job_schedule_modify, params)
+ try:
+ self.server.invoke_successfully(job_schedule_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ Autosupport log for job_schedule
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_job_schedule", cserver)
+
+ def apply(self):
+ """
+ Apply action to job-schedule
+ """
+ if not self.use_rest:
+ self.autosupport_log()
+ current = self.get_job_schedule()
+ action = self.na_helper.get_cd_action(current, self.parameters)
+ if action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if action == 'create':
+ self.create_job_schedule()
+ elif action == 'delete':
+ self.delete_job_schedule()
+ elif modify:
+ self.modify_job_schedule(modify, current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Execute action'''
+ job_obj = NetAppONTAPJob()
+ job_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py
new file mode 100644
index 00000000..6517f4b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+'''
+(c) 2019, Red Hat, Inc
+GNU General Public License v3.0+
+(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_ontap_kerberos_realm
+
+short_description: NetApp ONTAP vserver nfs kerberos realm
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>,<mzink@redhat.com>
+
+description:
+- Create, modify or delete vserver kerberos realm configuration
+
+options:
+
+ state:
+ description:
+ - Whether the Kerberos realm is present or absent.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm with kerberos realm configured
+ required: true
+ type: str
+
+ realm:
+ description:
+ - Kerberos realm name
+ required: true
+ type: str
+
+ kdc_vendor:
+ description:
+ - The vendor of the Key Distribution Centre (KDC) server
+ - Required if I(state=present)
+ choices: ['other', 'microsoft']
+ type: str
+
+ kdc_ip:
+ description:
+ - IP address of the Key Distribution Centre (KDC) server
+ - Required if I(state=present)
+ type: str
+
+ kdc_port:
+ description:
+ - TCP port on the KDC to be used for Kerberos communication.
+ - The default for this parameter is '88'.
+ type: str
+
+ clock_skew:
+ description:
+ - The clock skew in minutes is the tolerance for accepting tickets with time stamps that do not exactly match the host's system clock.
+ - The default for this parameter is '5' minutes.
+ type: str
+
+ comment:
+ description:
+ - Optional comment
+ type: str
+
+ admin_server_ip:
+ description:
+ - IP address of the host where the Kerberos administration daemon is running. This is usually the master KDC.
+ - If this parameter is omitted, the address specified in kdc_ip is used.
+ type: str
+
+ admin_server_port:
+ description:
+ - The TCP port on the Kerberos administration server where the Kerberos administration service is running.
+ - The default for this parmater is '749'
+ type: str
+
+ pw_server_ip:
+ description:
+ - IP address of the host where the Kerberos password-changing server is running.
+ - Typically, this is the same as the host indicated in the adminserver-ip.
+ - If this parameter is omitted, the IP address in kdc-ip is used.
+ type: str
+
+ pw_server_port:
+ description:
+ - The TCP port on the Kerberos password-changing server where the Kerberos password-changing service is running.
+ - The default for this parameter is '464'.
+ type: str
+
+ ad_server_ip:
+ description:
+ - IP Address of the Active Directory Domain Controller (DC). This is a mandatory parameter if the kdc-vendor is 'microsoft'.
+ type: str
+ version_added: '20.4.0'
+
+ ad_server_name:
+ description:
+ - Host name of the Active Directory Domain Controller (DC). This is a mandatory parameter if the kdc-vendor is 'microsoft'.
+ type: str
+ version_added: '20.4.0'
+'''
+
+EXAMPLES = '''
+
+ - name: Create kerberos realm other kdc vendor
+ na_ontap_kerberos_realm:
+ state: present
+ realm: 'EXAMPLE.COM'
+ vserver: 'vserver1'
+ kdc_ip: '1.2.3.4'
+ kdc_vendor: 'other'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create kerberos realm Microsoft kdc vendor
+ na_ontap_kerberos_realm:
+ state: present
+ realm: 'EXAMPLE.COM'
+ vserver: 'vserver1'
+ kdc_ip: '1.2.3.4'
+ kdc_vendor: 'microsoft'
+ ad_server_ip: '0.0.0.0'
+ ad_server_name: 'server'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapKerberosRealm(object):
+ '''
+ Kerberos Realm definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ admin_server_ip=dict(required=False, type='str'),
+ admin_server_port=dict(required=False, type='str'),
+ clock_skew=dict(required=False, type='str'),
+ comment=dict(required=False, type='str'),
+ kdc_ip=dict(required=False, type='str'),
+ kdc_port=dict(required=False, type='str'),
+ kdc_vendor=dict(required=False, type='str',
+ choices=['microsoft', 'other']),
+ pw_server_ip=dict(required=False, type='str'),
+ pw_server_port=dict(required=False, type='str'),
+ realm=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ ad_server_ip=dict(required=False, type='str'),
+ ad_server_name=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['kdc_vendor', 'kdc_ip']), ('kdc_vendor', 'microsoft', ['ad_server_ip', 'ad_server_name'])],
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ self.simple_attributes = [
+ 'admin_server_ip',
+ 'admin_server_port',
+ 'clock_skew',
+ 'kdc_ip',
+ 'kdc_port',
+ 'kdc_vendor',
+ ]
+
+ def get_krbrealm(self, realm_name=None, vserver_name=None):
+ '''
+ Checks if Kerberos Realm config exists.
+
+ :return:
+ kerberos realm object if found
+ None if not found
+ :rtype: object/None
+ '''
+ # Make query
+ krbrealm_info = netapp_utils.zapi.NaElement('kerberos-realm-get-iter')
+
+ if realm_name is None:
+ realm_name = self.parameters['realm']
+
+ if vserver_name is None:
+ vserver_name = self.parameters['vserver']
+
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm', **{'realm': realm_name, 'vserver-name': vserver_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ krbrealm_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(krbrealm_info, enable_tunneling=True)
+
+ # Get Kerberos Realm details
+ krbrealm_details = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ attributes_list = result.get_child_by_name('attributes-list')
+ config_info = attributes_list.get_child_by_name('kerberos-realm')
+
+ krbrealm_details = {
+ 'admin_server_ip': config_info.get_child_content('admin-server-ip'),
+ 'admin_server_port': config_info.get_child_content('admin-server-port'),
+ 'clock_skew': config_info.get_child_content('clock-skew'),
+ 'kdc_ip': config_info.get_child_content('kdc-ip'),
+ 'kdc_port': config_info.get_child_content('kdc-port'),
+ 'kdc_vendor': config_info.get_child_content('kdc-vendor'),
+ 'pw_server_ip': config_info.get_child_content('password-server-ip'),
+ 'pw_server_port': config_info.get_child_content('password-server-port'),
+ 'realm': config_info.get_child_content('realm'),
+ 'vserver': config_info.get_child_content('vserver-name'),
+ 'ad_server_ip': config_info.get_child_content('ad-server-ip'),
+ 'ad_server_name': config_info.get_child_content('ad-server-name')
+ }
+
+ return krbrealm_details
+
+ def create_krbrealm(self):
+ '''supported
+ Create Kerberos Realm configuration
+ '''
+ options = {
+ 'realm': self.parameters['realm']
+ }
+
+ # Other options/attributes
+ for attribute in self.simple_attributes:
+ if self.parameters.get(attribute) is not None:
+ options[str(attribute).replace('_', '-')] = self.parameters[attribute]
+
+ if self.parameters.get('pw_server_ip') is not None:
+ options['password-server-ip'] = self.parameters['pw_server_ip']
+ if self.parameters.get('pw_server_port') is not None:
+ options['password-server-port'] = self.parameters['pw_server_port']
+
+ if self.parameters.get('ad_server_ip') is not None:
+ options['ad-server-ip'] = self.parameters['ad_server_ip']
+ if self.parameters.get('ad_server_name') is not None:
+ options['ad-server-name'] = self.parameters['ad_server_name']
+
+ # Initialize NaElement
+ krbrealm_create = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm-create', **options)
+
+ # Try to create Kerberos Realm configuration
+ try:
+ self.server.invoke_successfully(krbrealm_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error creating Kerberos Realm configuration %s: %s' % (self.parameters['realm'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def delete_krbrealm(self):
+ '''
+ Delete Kerberos Realm configuration
+ '''
+ krbrealm_delete = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm-delete', **{'realm': self.parameters['realm']})
+
+ try:
+ self.server.invoke_successfully(krbrealm_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting Kerberos Realm configuration %s: %s' % (
+ self.parameters['realm'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_krbrealm(self, modify):
+ '''
+ Modify Kerberos Realm
+ :param modify: list of modify attributes
+ '''
+ krbrealm_modify = netapp_utils.zapi.NaElement('kerberos-realm-modify')
+ krbrealm_modify.add_new_child('realm', self.parameters['realm'])
+
+ for attribute in modify:
+ if attribute in self.simple_attributes:
+ krbrealm_modify.add_new_child(str(attribute).replace('_', '-'), self.parameters[attribute])
+ if attribute == 'pw_server_ip':
+ krbrealm_modify.add_new_child('password-server-ip', self.parameters['pw_server_ip'])
+ if attribute == 'pw_server_port':
+ krbrealm_modify.add_new_child('password-server-port', self.parameters['pw_server_port'])
+ if attribute == 'ad_server_ip':
+ krbrealm_modify.add_new_child('ad-server-ip', self.parameters['ad_server_ip'])
+ if attribute == 'ad_server_name':
+ krbrealm_modify.add_new_child('ad-server-name', self.parameters['ad_server_name'])
+
+ # Try to modify Kerberos Realm
+ try:
+ self.server.invoke_successfully(krbrealm_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error modifying Kerberos Realm %s: %s' % (self.parameters['realm'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_krbrealm()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ # create an ems log event for users with auto support turned on
+ netapp_utils.ems_log_event("na_ontap_kerberos_realm", self.server)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_krbrealm()
+ elif cd_action == 'delete':
+ self.delete_krbrealm()
+ elif modify:
+ self.modify_krbrealm(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+#
+# MAIN
+#
+def main():
+ '''ONTAP Kerberos Realm'''
+ krbrealm = NetAppOntapKerberosRealm()
+ krbrealm.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py
new file mode 100644
index 00000000..be34cac4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+'''
+(c) 2018-2019, NetApp, Inc
+GNU General Public License v3.0+
+(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_ontap_ldap
+
+short_description: NetApp ONTAP LDAP
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>/<mzink@redhat.com>
+
+description:
+- Create, modify or delete LDAP on NetApp ONTAP SVM/vserver
+
+options:
+
+ state:
+ description:
+ - Whether the LDAP is present or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm configured to use LDAP
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of LDAP client configuration
+ required: true
+ type: str
+
+ skip_config_validation:
+ description:
+ - Skip LDAP validation
+ choices: ['true', 'false']
+ type: str
+'''
+
+EXAMPLES = '''
+
+ - name: Enable LDAP on SVM
+ na_ontap_ldap:
+ state: present
+ name: 'example_ldap'
+ vserver: 'vserver1'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLDAP(object):
+ '''
+ LDAP Client definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ skip_config_validation=dict(required=False, default=None, choices=['true', 'false']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ldap(self, client_config_name=None):
+ '''
+ Checks if LDAP config exists.
+
+ :return:
+ ldap config object if found
+ None if not found
+ :rtype: object/None
+ '''
+ # Make query
+ config_info = netapp_utils.zapi.NaElement('ldap-config-get-iter')
+
+ if client_config_name is None:
+ client_config_name = self.parameters['name']
+
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config', **{'client-config': client_config_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ config_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(config_info, enable_tunneling=True)
+
+ # Get LDAP configuration details
+ config_details = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ attributes_list = result.get_child_by_name('attributes-list')
+ config_info = attributes_list.get_child_by_name('ldap-config')
+
+ # Define config details structure
+ config_details = {'client_config': config_info.get_child_content('client-config'),
+ 'skip_config_validation': config_info.get_child_content('skip-config-validation'),
+ 'vserver': config_info.get_child_content('vserver')}
+
+ return config_details
+
+ def create_ldap(self):
+ '''
+ Create LDAP configuration
+ '''
+ options = {
+ 'client-config': self.parameters['name'],
+ 'client-enabled': 'true'
+ }
+
+ if self.parameters.get('skip_config_validation') is not None:
+ options['skip-config-validation'] = self.parameters['skip_config_validation']
+
+ # Initialize NaElement
+ ldap_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config-create', **options)
+
+ # Try to create LDAP configuration
+ try:
+ self.server.invoke_successfully(ldap_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error creating LDAP configuration %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def delete_ldap(self):
+ '''
+ Delete LDAP configuration
+ '''
+ ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config-delete', **{})
+
+ try:
+ self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting LDAP configuration %s: %s' % (
+ self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_ldap(self, modify):
+ '''
+ Modify LDAP
+ :param modify: list of modify attributes
+ '''
+ ldap_modify = netapp_utils.zapi.NaElement('ldap-config-modify')
+ ldap_modify.add_new_child('client-config', self.parameters['name'])
+
+ for attribute in modify:
+ if attribute == 'skip_config_validation':
+ ldap_modify.add_new_child('skip-config-validation', self.parameters[attribute])
+
+ # Try to modify LDAP
+ try:
+ self.server.invoke_successfully(ldap_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error modifying LDAP %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_ldap()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ # create an ems log event for users with auto support turned on
+ netapp_utils.ems_log_event("na_ontap_ldap", self.server)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_ldap()
+ elif cd_action == 'delete':
+ self.delete_ldap()
+ elif modify:
+ self.modify_ldap(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+#
+# MAIN
+#
+def main():
+ '''ONTAP LDAP client configuration'''
+ ldapclient = NetAppOntapLDAP()
+ ldapclient.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py
new file mode 100644
index 00000000..c0133863
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py
@@ -0,0 +1,419 @@
+#!/usr/bin/python
+'''
+(c) 2018-2019, NetApp, Inc
+GNU General Public License v3.0+
+(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_ontap_ldap_client
+
+short_description: NetApp ONTAP LDAP client
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>/<mzink@redhat.com>
+
+description:
+- Create, modify or delete LDAP client on NetApp ONTAP
+
+options:
+
+ state:
+ description:
+ - Whether the specified LDAP client configuration exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm that holds LDAP client configuration
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of LDAP client configuration
+ required: true
+ type: str
+
+ ldap_servers:
+ description:
+ - Comma separated list of LDAP servers. FQDN's or IP addreses
+ - Required if I(state=present).
+ type: list
+ elements: str
+
+ schema:
+ description:
+ - LDAP schema
+ - Required if I(state=present).
+ choices: ['AD-IDMU', 'AD-SFU', 'MS-AD-BIS', 'RFC-2307']
+ type: str
+
+ ad_domain:
+ description:
+ - Active Directory Domain Name
+ type: str
+
+ base_dn:
+ description:
+ - LDAP base DN
+ type: str
+
+ base_scope:
+ description:
+ - LDAP search scope
+ choices: ['subtree', 'onelevel', 'base']
+ type: str
+
+ bind_as_cifs_server:
+ description:
+ - The cluster uses the CIFS server's credentials to bind to the LDAP server.
+ type: bool
+
+ preferred_ad_servers:
+ description:
+ - Preferred Active Directory (AD) Domain Controllers
+ type: list
+ elements: str
+
+ port:
+ description:
+ - LDAP server port
+ type: int
+
+ query_timeout:
+ description:
+ - LDAP server query timeout
+ type: int
+
+ min_bind_level:
+ description:
+ - Minimal LDAP server bind level.
+ choices: ['anonymous', 'simple', 'sasl']
+ type: str
+
+ bind_dn:
+ description:
+ - LDAP bind user DN
+ type: str
+
+ bind_password:
+ description:
+ - LDAP bind user password
+ type: str
+
+ use_start_tls:
+ description:
+ - Start TLS on LDAP connection
+ type: bool
+
+ referral_enabled:
+ description:
+ - LDAP Referral Chasing
+ type: bool
+
+ session_security:
+ description:
+ - Client Session Security
+ choices: ['none', 'sign', 'seal']
+ type: str
+'''
+
+EXAMPLES = '''
+
+ - name: Create LDAP client
+ na_ontap_ldap_client:
+ state: present
+ name: 'example_ldap'
+ vserver: 'vserver1'
+ ldap_servers: 'ldap1.example.company.com,ldap2.example.company.com'
+ base_dn: 'dc=example,dc=company,dc=com'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLDAPClient(object):
+ '''
+ LDAP Client definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ ad_domain=dict(required=False, default=None, type='str'),
+ base_dn=dict(required=False, type='str'),
+ base_scope=dict(required=False, default=None, choices=['subtree', 'onelevel', 'base']),
+ bind_as_cifs_server=dict(required=False, type='bool'),
+ bind_dn=dict(required=False, default=None, type='str'),
+ bind_password=dict(type='str', required=False, default=None, no_log=True),
+ name=dict(required=True, type='str'),
+ ldap_servers=dict(required=False, type='list', elements='str'),
+ min_bind_level=dict(required=False, default=None, choices=['anonymous', 'simple', 'sasl']),
+ preferred_ad_servers=dict(required=False, type='list', elements='str'),
+ port=dict(required=False, default=None, type='int'),
+ query_timeout=dict(required=False, default=None, type='int'),
+ referral_enabled=dict(required=False, type='bool'),
+ schema=dict(required=False, default=None, choices=['AD-IDMU', 'AD-SFU', 'MS-AD-BIS', 'RFC-2307']),
+ session_security=dict(required=False, default=None, choices=['none', 'sign', 'seal']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ use_start_tls=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['schema']),
+ ],
+ mutually_exclusive=[
+ ['ldap_servers', 'ad_domain'],
+ ['ldap_servers', 'preferred_ad_servers']
+ ],
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ self.simple_attributes = [
+ 'ad_domain',
+ 'base_dn',
+ 'base_scope',
+ 'bind_as_cifs_server',
+ 'bind_dn',
+ 'bind_password',
+ 'min_bind_level',
+ 'port',
+ 'query_timeout',
+ 'referral_enabled',
+ 'session_security',
+ 'use_start_tls'
+ ]
+
+ def get_ldap_client(self, client_config_name=None, vserver_name=None):
+ '''
+ Checks if LDAP client config exists.
+
+ :return:
+ ldap client config object if found
+ None if not found
+ :rtype: object/None
+ '''
+ # Make query
+ client_config_info = netapp_utils.zapi.NaElement('ldap-client-get-iter')
+
+ if client_config_name is None:
+ client_config_name = self.parameters['name']
+
+ if vserver_name is None:
+ vserver_name = '*'
+
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client',
+ **{
+ 'ldap-client-config': client_config_name,
+ 'vserver': vserver_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ client_config_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(client_config_info, enable_tunneling=False)
+
+ # Get LDAP client configuration details
+ client_config_details = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ client_config_info = attributes_list.get_child_by_name('ldap-client')
+ # Get LDAP servers list
+ ldap_server_list = list()
+ get_list = client_config_info.get_child_by_name('ldap-servers')
+ if get_list is not None:
+ ldap_server_list = [x.get_content() for x in get_list.get_children()]
+
+ preferred_ad_servers_list = list()
+ get_pref_ad_server_list = client_config_info.get_child_by_name('preferred-ad-servers')
+ if get_pref_ad_server_list is not None:
+ preferred_ad_servers_list = [x.get_content() for x in get_pref_ad_server_list.get_children()]
+
+ # Define config details structure
+ client_config_details = {
+ 'name': client_config_info.get_child_content('ldap-client-config'),
+ 'ldap_servers': ldap_server_list,
+ 'ad_domain': client_config_info.get_child_content('ad-domain'),
+ 'base_dn': client_config_info.get_child_content('base-dn'),
+ 'base_scope': client_config_info.get_child_content('base-scope'),
+ 'bind_as_cifs_server': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('bind-as-cifs-server')),
+ 'bind_dn': client_config_info.get_child_content('bind-dn'),
+ 'bind_password': client_config_info.get_child_content('bind-password'),
+ 'min_bind_level': client_config_info.get_child_content('min-bind-level'),
+ 'port': self.na_helper.get_value_for_int(from_zapi=True, value=client_config_info.get_child_content('port')),
+ 'preferred_ad_servers': preferred_ad_servers_list,
+ 'query_timeout': self.na_helper.get_value_for_int(from_zapi=True,
+ value=client_config_info.get_child_content('query-timeout')),
+ 'referral_enabled': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('referral-enabled')),
+ 'schema': client_config_info.get_child_content('schema'),
+ 'session_security': client_config_info.get_child_content('session-security'),
+ 'use_start_tls': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('use-start-tls'))
+ }
+ return client_config_details
+
+ def create_ldap_client(self):
+ '''
+ Create LDAP client configuration
+ '''
+
+ options = {
+ 'ldap-client-config': self.parameters['name'],
+ 'schema': self.parameters['schema'],
+ }
+
+ # Other options/attributes
+ for attribute in self.simple_attributes:
+ if self.parameters.get(attribute) is not None:
+ options[str(attribute).replace('_', '-')] = str(self.parameters[attribute])
+
+ # Initialize NaElement
+ ldap_client_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client-create', **options)
+
+ # LDAP servers NaElement
+ if self.parameters.get('ldap_servers') is not None:
+ ldap_servers_element = netapp_utils.zapi.NaElement('ldap-servers')
+ for ldap_server_name in self.parameters['ldap_servers']:
+ ldap_servers_element.add_new_child('string', ldap_server_name)
+ ldap_client_create.add_child_elem(ldap_servers_element)
+
+ # preferred_ad_servers
+ if self.parameters.get('preferred_ad_servers') is not None:
+ preferred_ad_servers_element = netapp_utils.zapi.NaElement('preferred-ad-servers')
+ for pref_ad_server in self.parameters['preferred_ad_servers']:
+ preferred_ad_servers_element.add_new_child('ip-address', pref_ad_server)
+ ldap_client_create.add_child_elem(preferred_ad_servers_element)
+
+ # Try to create LDAP configuration
+ try:
+ self.server.invoke_successfully(ldap_client_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(
+ msg='Error creating LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def delete_ldap_client(self):
+ '''
+ Delete LDAP client configuration
+ '''
+ ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ldap-client-delete', **{'ldap-client-config': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting LDAP client configuration %s: %s' % (
+ self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_ldap_client(self, modify):
+ '''
+ Modify LDAP client
+ :param modify: list of modify attributes
+ '''
+ ldap_client_modify = netapp_utils.zapi.NaElement('ldap-client-modify')
+ ldap_client_modify.add_new_child('ldap-client-config', self.parameters['name'])
+
+ for attribute in modify:
+ # LDAP_servers
+ if attribute == 'ldap_servers':
+ ldap_servers_element = netapp_utils.zapi.NaElement('ldap-servers')
+ for ldap_server_name in self.parameters['ldap_servers']:
+ ldap_servers_element.add_new_child('string', ldap_server_name)
+ ldap_client_modify.add_child_elem(ldap_servers_element)
+ # preferred_ad_servers
+ if attribute == 'preferred_ad_servers':
+ preferred_ad_servers_element = netapp_utils.zapi.NaElement('preferred-ad-servers')
+ ldap_client_modify.add_child_elem(preferred_ad_servers_element)
+ for pref_ad_server in self.parameters['preferred_ad_servers']:
+ preferred_ad_servers_element.add_new_child('ip-address', pref_ad_server)
+ # Simple attributes
+ if attribute in self.simple_attributes:
+ ldap_client_modify.add_new_child(str(attribute).replace('_', '-'), str(self.parameters[attribute]))
+
+ # Try to modify LDAP client
+ try:
+ self.server.invoke_successfully(ldap_client_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(
+ msg='Error modifying LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_ldap_client()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ # state is present, either ldap_servers or ad_domain is required
+ if self.parameters['state'] == 'present' and not self.parameters.get('ldap_servers') \
+ and self.parameters.get('ad_domain') is None:
+ self.module.fail_json(msg='Required one of ldap_servers or ad_domain')
+
+ if self.parameters['state'] == 'present' and cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ # create an ems log event for users with auto support turned on
+ netapp_utils.ems_log_event("na_ontap_ldap_client", self.server)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_ldap_client()
+ elif cd_action == 'delete':
+ self.delete_ldap_client()
+ elif modify:
+ self.modify_ldap_client(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+#
+# MAIN
+#
+def main():
+ '''ONTAP LDAP client configuration'''
+ ldapclient = NetAppOntapLDAPClient()
+ ldapclient.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py
new file mode 100644
index 00000000..9adee9bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py
@@ -0,0 +1,333 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_license
+
+short_description: NetApp ONTAP protocol and feature licenses
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Add or remove licenses on NetApp ONTAP.
+
+options:
+ state:
+ description:
+ - Whether the specified license should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ remove_unused:
+ description:
+ - Remove licenses that have no controller affiliation in the cluster.
+ type: bool
+
+ remove_expired:
+ description:
+ - Remove licenses that have expired in the cluster.
+ type: bool
+
+ serial_number:
+ description:
+ Serial number of the node associated with the license.
+ This parameter is used primarily when removing license for a specific service.
+ type: str
+
+ license_names:
+ type: list
+ elements: str
+ description:
+ - List of license-names to delete.
+ suboptions:
+ base:
+ description:
+ - Cluster Base License
+ nfs:
+ description:
+ - NFS License
+ cifs:
+ description:
+ - CIFS License
+ iscsi:
+ description:
+ - iSCSI License
+ fcp:
+ description:
+ - FCP License
+ cdmi:
+ description:
+ - CDMI License
+ snaprestore:
+ description:
+ - SnapRestore License
+ snapmirror:
+ description:
+ - SnapMirror License
+ flexclone:
+ description:
+ - FlexClone License
+ snapvault:
+ description:
+ - SnapVault License
+ snaplock:
+ description:
+ - SnapLock License
+ snapmanagersuite:
+ description:
+ - SnapManagerSuite License
+ snapprotectapps:
+ description:
+ - SnapProtectApp License
+ v_storageattach:
+ description:
+ - Virtual Attached Storage License
+
+ license_codes:
+ description:
+ - List of license codes to be added.
+ type: list
+ elements: str
+
+'''
+
+
+EXAMPLES = """
+- name: Add licenses
+ na_ontap_license:
+ state: present
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ serial_number: #################
+ license_codes: CODE1,CODE2
+
+- name: Remove licenses
+ na_ontap_license:
+ state: absent
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ remove_unused: false
+ remove_expired: true
+ serial_number: #################
+ license_names: nfs,cifs
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+def local_cmp(a, b):
+ """
+ compares with only values and not keys, keys should be the same for both dicts
+ :param a: dict 1
+ :param b: dict 2
+ :return: difference of values in both dicts
+ """
+ diff = [key for key in a if a[key] != b[key]]
+ return len(diff)
+
+
+class NetAppOntapLicense(object):
+ '''ONTAP license class'''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ serial_number=dict(required=False, type='str'),
+ remove_unused=dict(default=None, type='bool'),
+ remove_expired=dict(default=None, type='bool'),
+ license_codes=dict(default=None, type='list', elements='str'),
+ license_names=dict(default=None, type='list', elements='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False,
+ required_if=[
+ ('state', 'absent', ['serial_number', 'license_names'])]
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.state = parameters['state']
+ self.serial_number = parameters['serial_number']
+ self.remove_unused = parameters['remove_unused']
+ self.remove_expired = parameters['remove_expired']
+ self.license_codes = parameters['license_codes']
+ self.license_names = parameters['license_names']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_licensing_status(self):
+ """
+ Check licensing status
+
+ :return: package (key) and licensing status (value)
+ :rtype: dict
+ """
+ license_status = netapp_utils.zapi.NaElement(
+ 'license-v2-status-list-info')
+ result = None
+ try:
+ result = self.server.invoke_successfully(license_status,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error checking license status: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ return_dictionary = {}
+ license_v2_status = result.get_child_by_name('license-v2-status')
+ if license_v2_status:
+ for license_v2_status_info in license_v2_status.get_children():
+ package = license_v2_status_info.get_child_content('package')
+ status = license_v2_status_info.get_child_content('method')
+ return_dictionary[package] = status
+
+ return return_dictionary
+
+ def remove_licenses(self, package_name):
+ """
+ Remove requested licenses
+ :param:
+ package_name: Name of the license to be deleted
+ """
+ license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
+ license_delete.add_new_child('serial-number', self.serial_number)
+ license_delete.add_new_child('package', package_name)
+ try:
+ self.server.invoke_successfully(license_delete,
+ enable_tunneling=False)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 - Object not found
+ if to_native(error.code) == "15661":
+ return False
+ else:
+ self.module.fail_json(msg="Error removing license %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def remove_unused_licenses(self):
+ """
+ Remove unused licenses
+ """
+ remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
+ try:
+ self.server.invoke_successfully(remove_unused,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing unused licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def remove_expired_licenses(self):
+ """
+ Remove expired licenses
+ """
+ remove_expired = netapp_utils.zapi.NaElement(
+ 'license-v2-delete-expired')
+ try:
+ self.server.invoke_successfully(remove_expired,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing expired licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def add_licenses(self):
+ """
+ Add licenses
+ """
+ license_add = netapp_utils.zapi.NaElement('license-v2-add')
+ codes = netapp_utils.zapi.NaElement('codes')
+ for code in self.license_codes:
+ codes.add_new_child('license-code-v2', str(code.strip().lower()))
+ license_add.add_child_elem(codes)
+ try:
+ self.server.invoke_successfully(license_add,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error adding licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call add, delete or modify methods'''
+ changed = False
+ create_license = False
+ remove_license = False
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_license", cserver)
+ # Add / Update licenses.
+ license_status = self.get_licensing_status()
+
+ if self.state == 'absent': # delete
+ changed = True
+ else: # add or update
+ if self.license_codes is not None:
+ create_license = True
+ changed = True
+ if self.remove_unused is not None:
+ remove_license = True
+ changed = True
+ if self.remove_expired is not None:
+ remove_license = True
+ changed = True
+ if changed and not self.module.check_mode:
+ if self.state == 'present': # execute create
+ if create_license:
+ self.add_licenses()
+ if self.remove_unused is not None:
+ self.remove_unused_licenses()
+ if self.remove_expired is not None:
+ self.remove_expired_licenses()
+ # not able to detect that a new license is required until we try to install it.
+ if create_license or remove_license:
+ new_license_status = self.get_licensing_status()
+ if local_cmp(license_status, new_license_status) == 0:
+ changed = False
+ else: # execute delete
+ license_deleted = False
+ # not able to detect which license is required to delete until we try it.
+ for package in self.license_names:
+ license_deleted |= self.remove_licenses(package)
+ changed = license_deleted
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ '''Apply license operations'''
+ obj = NetAppOntapLicense()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py
new file mode 100644
index 00000000..d68030fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_login_messages
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_login_messages
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.1.0'
+short_description: Setup login banner and message of the day
+description:
+ - This module allows you to manipulate login banner and motd for a vserver
+options:
+ banner:
+ description:
+ - Login banner Text message.
+ type: str
+ vserver:
+ description:
+ - The name of the SVM login messages should be set for.
+ required: true
+ type: str
+ motd_message:
+ description:
+ - MOTD Text message.
+ type: str
+ aliases:
+ - message
+ show_cluster_motd:
+ description:
+ - Set to I(false) if Cluster-level Message of the Day should not be shown
+ type: bool
+ default: True
+'''
+
+EXAMPLES = """
+
+ - name: modify banner vserver
+ na_ontap_login_messages:
+ vserver: trident_svm
+ banner: this is trident vserver
+ usename: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+ - name: modify motd vserver
+ na_ontap_login_messages:
+ vserver: trident_svm
+ motd_message: this is trident vserver
+ show_cluster_motd: True
+ usename: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLoginMessages(object):
+ """
+ modify and delete login banner and motd
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ banner=dict(required=False, type='str'),
+ motd_message=dict(required=False, type='str', aliases=['message']),
+ show_cluster_motd=dict(default=True, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['show_cluster_motd', 'banner', 'motd_message']]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_banner_motd(self, uuid=None):
+ if self.use_rest:
+ api = 'security/login/messages/' + uuid
+ params = {
+ 'fields': '*'
+ }
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg='Error when fetching login_banner info: %s' % error)
+ return_result = dict()
+ return_result['banner'] = message['banner'].rstrip() if message.get('banner') else ''
+ return_result['motd_message'] = message['message'].rstrip() if message.get('message') else ''
+ if message.get('show_cluster_message'):
+ return_result['show_cluster_message'] = message['show_cluster_message']
+ return return_result
+ else:
+ login_banner_get_iter = netapp_utils.zapi.NaElement('vserver-login-banner-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ login_banner_info = netapp_utils.zapi.NaElement('vserver-login-banner-info')
+ login_banner_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(login_banner_info)
+ login_banner_get_iter.add_child_elem(query)
+ return_result = dict()
+ try:
+ result = self.server.invoke_successfully(login_banner_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching login_banner info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ login_banner_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-login-banner-info')
+ return_result['banner'] = login_banner_info.get_child_content('message')
+ return_result['banner'] = str(return_result['banner']).rstrip()
+ # if the message is '-' that means the banner doesn't exist.
+ if return_result['banner'] == '-' or return_result['banner'] == 'None':
+ return_result['banner'] = ''
+
+ motd_get_iter = netapp_utils.zapi.NaElement('vserver-motd-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(motd_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching motd info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ motd_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-motd-info')
+ return_result['motd_message'] = motd_info.get_child_content('message')
+ return_result['motd_message'] = str(return_result['motd_message']).rstrip()
+ return_result['show_cluster_motd'] = True if motd_info.get_child_content(
+ 'is-cluster-message-enabled') == 'true' else False
+ if return_result['motd_message'] == 'None':
+ return_result['motd_message'] = ''
+ return return_result
+
+ def modify_banner(self, modify, uuid):
+ if self.use_rest:
+ api = 'security/login/messages/' + uuid
+ params = {
+ "banner": modify['banner']
+ }
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg='Error when modifying banner: %s' % error)
+ else:
+ login_banner_modify = netapp_utils.zapi.NaElement('vserver-login-banner-modify-iter')
+ login_banner_modify.add_new_child('message', modify['banner'])
+ query = netapp_utils.zapi.NaElement('query')
+ login_banner_info = netapp_utils.zapi.NaElement('vserver-login-banner-info')
+ login_banner_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(login_banner_info)
+ login_banner_modify.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(login_banner_modify, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error modifying login_banner: %s" % (to_native(err)),
+ exception=traceback.format_exc())
+
+ def modify_motd(self, modify, uuid):
+ if self.use_rest:
+ api = 'security/login/messages/' + uuid
+ params = {
+ 'message': modify['motd_message'],
+ }
+ if modify.get('show_cluster_motd'):
+ params['show_cluster_message'] = modify['show_cluster_motd']
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg='Error when modifying motd: %s' % error)
+ else:
+ motd_create = netapp_utils.zapi.NaElement('vserver-motd-modify-iter')
+ if modify.get('motd_message') is not None:
+ motd_create.add_new_child('message', modify['motd_message'])
+ if modify.get('show_cluster_motd') is not None:
+ motd_create.add_new_child('is-cluster-message-enabled', 'true' if modify['show_cluster_motd'] is True else 'false')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_create.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(motd_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error modifying motd: %s" % (to_native(err)),
+ exception=traceback.format_exc())
+
+ def get_svm_uuid(self):
+ """
+ Get a svm's uuid
+ :return: uuid of the svm
+ """
+ params = {'name': self.parameters['vserver'],
+ 'fields': 'uuid'
+ }
+ api = 'svm/svms'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ if message['num_records'] == 0:
+ self.module.fail_json(msg="Error fetching specified vserver. Please make sure vserver name is correct. For cluster vserver, Please use ZAPI.")
+ return message['records'][0]['uuid']
+
+ def apply(self):
+ uuid = None
+ modify = None
+ if self.use_rest:
+ uuid = self.get_svm_uuid()
+ else:
+ netapp_utils.ems_log_event("na_ontap_login_banner", self.server)
+
+ current = self.get_banner_motd(uuid=uuid)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify.get('banner') is not None:
+ self.modify_banner(modify, uuid=uuid)
+ if modify.get('show_cluster_motd') is not None or modify.get('motd_message') is not None:
+ self.modify_motd(modify, uuid=uuid)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Execute action from playbook'''
+ messages_obj = NetAppOntapLoginMessages()
+ messages_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py
new file mode 100644
index 00000000..8f9b5abb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py
@@ -0,0 +1,757 @@
+#!/usr/bin/python
+
+# (c) 2017-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_lun
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_lun
+
+short_description: NetApp ONTAP manage LUNs
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, destroy, resize LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the LUN to manage.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - The name of the LUN to be renamed.
+ type: str
+ version_added: 20.12.0
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the LUN should exist on.
+ - Required if san_application_template is not present.
+ - Not allowed if san_application_template is present.
+ type: str
+
+ size:
+ description:
+ - The size of the LUN in C(size_unit).
+ - Required when C(state=present).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ type: str
+
+ force_resize:
+ description:
+ Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally
+ reducing the LUN size.
+ type: bool
+ default: false
+
+ force_remove:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
+ - If "false", destroying an online and mapped LUN will fail.
+ type: bool
+ default: false
+
+ force_remove_fenced:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed while it is fenced.
+ - If "false", attempting to destroy a fenced LUN will fail.
+ - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
+ type: bool
+ default: false
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+ type: str
+
+ os_type:
+ description:
+ - The os type for the LUN.
+ type: str
+ aliases: ['ostype']
+
+ qos_policy_group:
+ description:
+ - The QoS policy group to be set on the LUN.
+ type: str
+ version_added: 20.12.0
+
+ space_reserve:
+ description:
+ - This can be set to "false" which will create a LUN without any space being reserved.
+ type: bool
+ default: True
+
+ space_allocation:
+ description:
+ - This enables support for the SCSI Thin Provisioning features. If the Host and file system do
+ not support this do not enable it.
+ type: bool
+ default: False
+ version_added: 2.7.0
+
+ use_exact_size:
+ description:
+ - This can be set to "False" which will round the LUN >= 450g.
+ type: bool
+ default: True
+ version_added: 20.11.0
+
+ san_application_template:
+ description:
+ - additional options when using the application/applications REST API to create LUNs.
+ - the module is using ZAPI by default, and switches to REST if any suboption is present.
+ - create one or more LUNs (and the associated volume as needed).
+ - only creation or deletion of a SAN application is supported. Changes are ignored.
+ - operations at the LUN level are supported, they require to know the LUN short name.
+ - this requires ONTAP 9.6 or higher.
+ type: dict
+ version_added: 20.12.0
+ suboptions:
+ name:
+ description: name of the SAN application.
+ type: str
+ required: True
+ igroup_name:
+ description: name of the initiator group through which the contents of this application will be accessed.
+ type: str
+ lun_count:
+ description: number of LUNs in the application component (1 to 32).
+ type: int
+ protection_type:
+ description:
+ - The snasphot policy for the volume supporting the LUNs.
+ type: dict
+ suboptions:
+ local_policy:
+ description:
+ - The snapshot copy policy for the volume.
+ type: str
+ storage_service:
+ description:
+ - The performance service level (PSL) for this volume
+ type: str
+ choices: ['value', 'performance', 'extreme']
+ tiering:
+ description:
+ - Cloud tiering policy (see C(tiering_policy) for a more complete description).
+ type: dict
+ suboptions:
+ control:
+ description: Storage tiering placement rules for the container.
+ choices: ['required', 'best_effort', 'disallowed']
+ type: str
+ policy:
+ description:
+ - Cloud tiering policy (see C(tiering_policy)).
+ - Must match C(tiering_policy) if both are present.
+ choices: ['snapshot-only', 'auto', 'backup', 'none']
+ type: str
+ object_stores:
+ description: list of object store names for tiering.
+ type: list
+ elements: str
+ use_san_application:
+ description:
+ - Whether to use the application/applications REST/API to create LUNs.
+ - This will default to true if any other suboption is present.
+ type: bool
+ default: true
+
+'''
+
+EXAMPLES = """
+- name: Create LUN
+ na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: mb
+ os_type: linux
+ space_reserve: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Resize LUN
+ na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ force_resize: True
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: gb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Create LUNs using SAN application
+ tags: create
+ na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ size: 15
+ size_unit: mb
+ os_type: linux
+ space_reserve: false
+ san_application_template:
+ name: san-ansibleLUN
+ igroup_name: testme_igroup
+ lun_count: 3
+ protection_type:
+ local_policy: default
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLUN(object):
+ ''' create, modify, delete LUN '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ force_resize=dict(default=False, type='bool'),
+ force_remove=dict(default=False, type='bool'),
+ force_remove_fenced=dict(default=False, type='bool'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ os_type=dict(required=False, type='str', aliases=['ostype']),
+ qos_policy_group=dict(required=False, type='str'),
+ space_reserve=dict(required=False, type='bool', default=True),
+ space_allocation=dict(required=False, type='bool', default=False),
+ use_exact_size=dict(required=False, type='bool', default=True),
+ san_application_template=dict(type='dict', options=dict(
+ use_san_application=dict(type='bool', default=True),
+ name=dict(required=True, type='str'),
+ igroup_name=dict(type='str'),
+ lun_count=dict(type='int'),
+ protection_type=dict(type='dict', options=dict(
+ local_policy=dict(type='str'),
+ )),
+ storage_service=dict(type='str', choices=['value', 'performance', 'extreme']),
+ tiering=dict(type='dict', options=dict(
+ control=dict(type='str', choices=['required', 'best_effort', 'disallowed']),
+ policy=dict(type='str', choices=['snapshot-only', 'auto', 'backup', 'none']),
+ object_stores=dict(type='list', elements='str') # create only
+ )),
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up state variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('size') is not None:
+ self.parameters['size'] *= netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ # REST API for application/applications if needed
+ self.rest_api, self.rest_app = self.setup_rest_application()
+
+ def setup_rest_application(self):
+ use_application_template = self.na_helper.safe_get(self.parameters, ['san_application_template', 'use_san_application'])
+ rest_api, rest_app = None, None
+ if use_application_template:
+ if self.parameters.get('flexvol_name') is not None:
+ self.module.fail_json(msg="'flexvol_name' option is not supported when san_application_template is present")
+ rest_api = netapp_utils.OntapRestAPI(self.module)
+ name = self.na_helper.safe_get(self.parameters, ['san_application_template', 'name'], allow_sparse_dict=False)
+ rest_app = RestApplication(rest_api, self.parameters['vserver'], name)
+ elif self.parameters.get('flexvol_name') is None:
+ self.module.fail_json(msg="flexvol_name option is required when san_application_template is not present")
+ return rest_api, rest_app
+
+ def get_luns(self, lun_path=None):
+ """
+ Return list of LUNs matching vserver and volume names.
+
+ :return: list of LUNs in XML format.
+ :rtype: list
+ """
+ luns = []
+ tag = None
+ if lun_path is None and self.parameters.get('flexvol_name') is None:
+ return luns
+
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('vserver', self.parameters['vserver'])
+ if lun_path is not None:
+ query_details.add_new_child('lun_path', lun_path)
+ else:
+ query_details.add_new_child('volume', self.parameters['flexvol_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ while True:
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ lun_info.add_child_elem(query)
+ if tag:
+ lun_info.add_new_child('tag', tag, True)
+
+ result = self.server.invoke_successfully(lun_info, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attr_list = result.get_child_by_name('attributes-list')
+ luns.extend(attr_list.get_children())
+ tag = result.get_child_content('next-tag')
+ if tag is None:
+ break
+ return luns
+
+ def get_lun_details(self, lun):
+ """
+ Extract LUN details, from XML to python dict
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ return_value = dict()
+ return_value['size'] = int(lun.get_child_content('size'))
+ bool_attr_map = {
+ 'is-space-alloc-enabled': 'space_allocation',
+ 'is-space-reservation-enabled': 'space_reserve'
+ }
+ for attr in bool_attr_map:
+ value = lun.get_child_content(attr)
+ if value is not None:
+ return_value[bool_attr_map[attr]] = self.na_helper.get_value_for_bool(True, value)
+ str_attr_map = {
+ 'name': 'name',
+ 'path': 'path',
+ 'qos-policy-group': 'qos_policy_group',
+ 'multiprotocol-type': 'os_type'
+ }
+ for attr in str_attr_map:
+ value = lun.get_child_content(attr)
+ if value is not None:
+ return_value[str_attr_map[attr]] = value
+
+ # Find out if the lun is attached
+ attached_to = None
+ lun_id = None
+ if lun.get_child_content('mapped') == 'true':
+ lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-map-list-info', **{'path': lun.get_child_content('path')})
+ result = self.server.invoke_successfully(
+ lun_map_list, enable_tunneling=True)
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ igroup = igroup_info.get_child_content(
+ 'initiator-group-name')
+ attached_to = igroup
+ lun_id = igroup_info.get_child_content('lun-id')
+
+ return_value.update({
+ 'attached_to': attached_to,
+ 'lun_id': lun_id
+ })
+ return return_value
+
+ def find_lun(self, luns, name, lun_path=None):
+ """
+ Return lun record matching name or path
+
+ :return: lun record
+ :rtype: XML or None if not found
+ """
+ for lun in luns:
+ path = lun.get_child_content('path')
+ if lun_path is not None:
+ if lun_path == path:
+ return lun
+ else:
+ if name == path:
+ return lun
+ _rest, _splitter, found_name = path.rpartition('/')
+ if found_name == name:
+ return lun
+ return None
+
+ def get_lun(self, name, lun_path=None):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ luns = self.get_luns(lun_path)
+ lun = self.find_lun(luns, name, lun_path)
+ if lun is not None:
+ return self.get_lun_details(lun)
+ return None
+
+ def get_luns_from_app(self):
+ app_details, error = self.rest_app.get_application_details()
+ self.fail_on_error(error)
+ if app_details is not None:
+ app_details['paths'] = self.get_lun_paths_from_app()
+ return app_details
+
+ def get_lun_paths_from_app(self):
+ """Get luns path for SAN application"""
+ backing_storage, error = self.rest_app.get_application_component_backing_storage()
+ self.fail_on_error(error)
+ # {'luns': [{'path': '/vol/ansibleLUN/ansibleLUN_1', ...
+ if backing_storage is not None:
+ return [lun['path'] for lun in backing_storage.get('luns', [])]
+ return None
+
+ def get_lun_path_from_backend(self, name):
+ """returns lun path matching name if found in backing_storage
+ retruns None if not found
+ """
+ lun_paths = self.get_lun_paths_from_app()
+ match = "/%s" % name
+ for path in lun_paths:
+ if path.endswith(match):
+ return path
+ return None
+
+ def create_san_app_component(self):
+ '''Create SAN application component'''
+ required_options = ('name', 'size')
+ for option in required_options:
+ if self.parameters.get(option) is None:
+ self.module.fail_json(msg='Error: "%s" is required to create san application.' % option)
+
+ application_component = dict(
+ name=self.parameters['name'],
+ total_size=self.parameters['size'],
+ lun_count=1 # default value, may be overriden below
+ )
+ for attr in ('igroup_name', 'lun_count', 'storage_service'):
+ value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
+ if value is not None:
+ application_component[attr] = value
+ for attr in ('os_type', 'qos_policy_group'):
+ value = self.na_helper.safe_get(self.parameters, [attr])
+ if value is not None:
+ if attr == 'qos_policy_group':
+ attr = 'qos'
+ value = dict(policy=dict(name=value))
+ application_component[attr] = value
+ tiering = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering'])
+ if tiering is not None:
+ application_component['tiering'] = dict()
+ for attr in ('control', 'policy', 'object_stores'):
+ value = tiering.get(attr)
+ if attr == 'object_stores' and value is not None:
+ value = [dict(name=x) for x in value]
+ if value is not None:
+ application_component['tiering'][attr] = value
+ return application_component
+
+ def create_san_app_body(self):
+ '''Create body for san template'''
+ # TODO:
+ # Should we support new_igroups?
+ # It may raise idempotency issues if the REST call fails if the igroup already exists.
+ # And we already have na_ontap_igroups.
+ san = {
+ 'application_components': [self.create_san_app_component()],
+ }
+ for attr in ('protection_type',):
+ value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
+ if value is not None:
+ # we expect value to be a dict, but maybe an empty dict
+ value = self.na_helper.filter_out_none_entries(value)
+ if value:
+ san[attr] = value
+ for attr in ('os_type',):
+ value = self.na_helper.safe_get(self.parameters, [attr])
+ if value is not None:
+ san[attr] = value
+ body, error = self.rest_app.create_application_body('san', san)
+ return body, error
+
+ def create_san_application(self):
+ '''Use REST application/applications san template to create one or more LUNs'''
+ body, error = self.create_san_app_body()
+ self.fail_on_error(error)
+ dummy, error = self.rest_app.create_application(body)
+ self.fail_on_error(error)
+
+ def delete_san_application(self):
+ '''Use REST application/applications san template to delete one or more LUNs'''
+ dummy, error = self.rest_app.delete_application()
+ self.fail_on_error(error)
+
+ def create_lun(self):
+ """
+ Create LUN with requested name and size
+ """
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ options = {'path': path,
+ 'size': str(self.parameters['size']),
+ 'space-reservation-enabled': str(self.parameters['space_reserve']),
+ 'space-allocation-enabled': str(self.parameters['space_allocation']),
+ 'use-exact-size': str(self.parameters['use_exact_size'])}
+ if self.parameters.get('os_type') is not None:
+ options['ostype'] = self.parameters['os_type']
+ if self.parameters.get('qos_policy_group') is not None:
+ options['qos-policy-group'] = self.parameters['qos_policy_group']
+ lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-create-by-size', **options)
+
+ try:
+ self.server.invoke_successfully(lun_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error provisioning lun %s of size %s: %s"
+ % (self.parameters['name'], self.parameters['size'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_lun(self, path):
+ """
+ Delete requested LUN
+ """
+ lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-destroy', **{'path': path,
+ 'force': str(self.parameters['force_remove']),
+ 'destroy-fenced-lun':
+ str(self.parameters['force_remove_fenced'])})
+
+ try:
+ self.server.invoke_successfully(lun_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def resize_lun(self, path):
+ """
+ Resize requested LUN.
+
+ :return: True if LUN was actually re-sized, false otherwise.
+ :rtype: bool
+ """
+ lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-resize', **{'path': path,
+ 'size': str(self.parameters['size']),
+ 'force': str(self.parameters['force_resize'])})
+ try:
+ self.server.invoke_successfully(lun_resize, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ if to_native(exc.code) == "9042":
+ # Error 9042 denotes the new LUN size being the same as the
+ # old LUN size. This happens when there's barely any difference
+ # in the two sizes. For example, from 8388608 bytes to
+ # 8194304 bytes. This should go away if/when the default size
+ # requested/reported to/from the controller is changed to a
+ # larger unit (MB/GB/TB).
+ return False
+ else:
+ self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ return True
+
+ def set_lun_value(self, path, key, value):
+ key_to_zapi = dict(
+ qos_policy_group=('lun-set-qos-policy-group', 'qos-policy-group'),
+ space_allocation=('lun-set-space-alloc', 'enable'),
+ space_reserve=('lun-set-space-reservation-info', 'enable')
+ )
+ if key in key_to_zapi:
+ zapi, option = key_to_zapi[key]
+ else:
+ self.module.fail_json(msg="option %s cannot be modified to %s" % (key, value))
+ options = dict(path=path)
+ if option == 'enable':
+ options[option] = self.na_helper.get_value_for_bool(False, value)
+ else:
+ options[option] = value
+
+ lun_set = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(lun_set, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error setting lun option %s: %s" % (key, to_native(exc)),
+ exception=traceback.format_exc())
+ return
+
+ def modify_lun(self, path, modify):
+ """
+ update LUN properties (except size or name)
+ """
+ for key, value in modify.items():
+ self.set_lun_value(path, key, value)
+
+ def rename_lun(self, path, new_path):
+ """
+ rename LUN
+ """
+ lun_move = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-move', **{'path': path,
+ 'new-path': new_path})
+ try:
+ self.server.invoke_successfully(lun_move, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error moving lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def fail_on_error(self, error, stack=False):
+ if error is None:
+ return
+ elements = dict(msg="Error: %s" % error)
+ if stack:
+ elements['stack'] = traceback.format_stack()
+ self.module.fail_json(**elements)
+
+ def apply(self):
+ results = dict()
+ warnings = list()
+ netapp_utils.ems_log_event("na_ontap_lun", self.server)
+ app_cd_action = None
+ if self.rest_app:
+ app_current, error = self.rest_app.get_application_uuid()
+ self.fail_on_error(error)
+ app_cd_action = self.na_helper.get_cd_action(app_current, self.parameters)
+ if app_cd_action == 'create' and self.parameters.get('size') is None:
+ self.module.fail_json(msg="size is a required parameter for create.")
+
+ # For LUNs created using a SAN application, we're getting lun paths from the backing storage
+ lun_path, from_lun_path = None, None
+ from_name = self.parameters.get('from_name')
+ if self.rest_app and app_cd_action is None and app_current:
+ lun_path = self.get_lun_path_from_backend(self.parameters['name'])
+ if from_name is not None:
+ from_lun_path = self.get_lun_path_from_backend(from_name)
+
+ if app_cd_action is None:
+ # actions at LUN level
+ current = self.get_lun(self.parameters['name'], lun_path)
+ if current is not None and lun_path is None:
+ lun_path = current['path']
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify, rename = None, None
+ if cd_action == 'create' and from_name is not None:
+ # create by renaming existing LUN, if it really exists
+ old_lun = self.get_lun(from_name, from_lun_path)
+ rename = self.na_helper.is_rename_action(old_lun, current)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming lun: %s does not exist" % from_name)
+ if rename:
+ current = old_lun
+ if from_lun_path is None:
+ from_lun_path = current['path']
+ head, _sep, tail = from_lun_path.rpartition(from_name)
+ if tail:
+ self.module.fail_json(msg="Error renaming lun: %s does not match lun_path %s" % (from_name, from_lun_path))
+ lun_path = head + self.parameters['name']
+ results['renamed'] = True
+ cd_action = None
+ if cd_action == 'create' and self.parameters.get('size') is None:
+ self.module.fail_json(msg="size is a required parameter for create.")
+ if cd_action is None and self.parameters['state'] == 'present':
+ # we already handled rename if required
+ current.pop('name', None)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ results['modify'] = dict(modify)
+ if cd_action and self.rest_app and app_cd_action is None and app_current:
+ msg = 'This module does not support %s a LUN by name %s a SAN application.' %\
+ ('adding', 'to') if cd_action == 'create' else ('removing', 'from')
+ warnings.append(msg)
+ cd_action = None
+ self.na_helper.changed = False
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if app_cd_action == 'create':
+ self.create_san_application()
+ elif app_cd_action == 'delete':
+ self.rest_app.delete_application()
+ elif cd_action == 'create':
+ self.create_lun()
+ elif cd_action == 'delete':
+ self.delete_lun(lun_path)
+ else:
+ if rename:
+ self.rename_lun(from_lun_path, lun_path)
+ size_changed = False
+ if modify and 'size' in modify:
+ # Ensure that size was actually changed. Please
+ # read notes in 'resize_lun' function for details.
+ size_changed = self.resize_lun(lun_path)
+ modify.pop('size')
+ if modify:
+ self.modify_lun(lun_path, modify)
+ if not modify and not rename:
+ # size may not have changed
+ self.na_helper.changed = size_changed
+
+ results['changed'] = self.na_helper.changed
+ self.module.exit_json(**results)
+
+
+def main():
+ lun = NetAppOntapLUN()
+ lun.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py
new file mode 100644
index 00000000..e55b663c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_lun_copy
+
+short_description: NetApp ONTAP copy LUNs
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Copy LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+
+ destination_vserver:
+ description:
+ - the name of the Vserver that will host the new LUN.
+ required: true
+ type: str
+
+ destination_path:
+ description:
+ - Specifies the full path to the new LUN.
+ required: true
+ type: str
+
+ source_path:
+ description:
+ - Specifies the full path to the source LUN.
+ required: true
+ type: str
+
+ source_vserver:
+ description:
+ - Specifies the name of the vserver hosting the LUN to be copied.
+ type: str
+
+ '''
+EXAMPLES = """
+- name: Copy LUN
+ na_ontap_lun_copy:
+ destination_vserver: ansible
+ destination_path: /vol/test/test_copy_dest_dest_new
+ source_path: /vol/test/test_copy_1
+ source_vserver: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLUNCopy(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ destination_vserver=dict(required=True, type='str'),
+ destination_path=dict(required=True, type='str'),
+ source_path=dict(required=True, type='str'),
+ source_vserver=dict(required=False, type='str'),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['destination_vserver'])
+
+ def get_lun(self):
+ """
+ Check if the LUN exists
+
+ :return: true is it exists, false otherwise
+ :rtype: bool
+ """
+
+ return_value = False
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+
+ query_details.add_new_child('path', self.parameters['destination_path'])
+ query_details.add_new_child('vserver', self.parameters['destination_vserver'])
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(lun_info, True)
+
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error getting lun info %s for verver %s: %s" %
+ (self.parameters['destination_path'], self.parameters['destination_vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return_value = True
+ return return_value
+
+ def copy_lun(self):
+ """
+ Copy LUN with requested path and vserver
+ """
+ lun_copy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-copy-start', **{'source-vserver': self.parameters['source_vserver']})
+
+ path_obj = netapp_utils.zapi.NaElement('paths')
+ pair = netapp_utils.zapi.NaElement('lun-path-pair')
+ pair.add_new_child('destination-path', self.parameters['destination_path'])
+ pair.add_new_child('source-path', self.parameters['source_path'])
+ path_obj.add_child_elem(pair)
+ lun_copy.add_child_elem(path_obj)
+
+ try:
+ self.server.invoke_successfully(lun_copy, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error copying lun from %s to vserver %s: %s" %
+ (self.parameters['source_vserver'], self.parameters['destination_vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+
+ netapp_utils.ems_log_event("na_ontap_lun_copy", self.server)
+ if self.get_lun(): # lun already exists at destination
+ changed = False
+ else:
+ changed = True
+ if self.module.check_mode:
+ pass
+ else:
+ # need to copy lun
+ if self.parameters['state'] == 'present':
+ self.copy_lun()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppOntapLUNCopy()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py
new file mode 100644
index 00000000..b1ee175f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+
+""" this is lun mapping module
+
+ (c) 2018-2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = """
+
+module: na_ontap_lun_map
+
+short_description: NetApp ONTAP LUN maps
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Map and unmap LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ initiator_group_name:
+ description:
+ - Initiator group to map to the given LUN.
+ required: true
+ type: str
+
+ path:
+ description:
+ - Path of the LUN..
+ required: true
+ type: str
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+ type: str
+
+ lun_id:
+ description:
+ - LUN ID assigned for the map.
+ type: str
+
+
+"""
+
+EXAMPLES = """
+- name: Create LUN mapping
+ na_ontap_lun_map:
+ state: present
+ initiator_group_name: ansibleIgroup3234
+ path: /vol/iscsi_path/iscsi_lun
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Unmap LUN
+ na_ontap_lun_map:
+ state: absent
+ initiator_group_name: ansibleIgroup3234
+ path: /vol/iscsi_path/iscsi_lun
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+lun_node:
+ description: NetApp controller that is hosting the LUN.
+ returned: success
+ type: str
+ sample: node01
+lun_ostype:
+ description: Specifies the OS of the host accessing the LUN.
+ returned: success
+ type: str
+ sample: vmware
+lun_serial:
+ description: A unique, 12-byte, ASCII string used to identify the LUN.
+ returned: success
+ type: str
+ sample: 80E7/]LZp1Tt
+lun_naa_id:
+ description: The Network Address Authority (NAA) identifier for the LUN.
+ returned: success
+ type: str
+ sample: 600a0980383045372f5d4c5a70315474
+lun_state:
+ description: Online or offline status of the LUN.
+ returned: success
+ type: str
+ sample: online
+lun_size:
+ description: Size of the LUN in bytes.
+ returned: success
+ type: int
+ sample: 2199023255552
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+import codecs
+from ansible.module_utils._text import to_text, to_bytes
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLUNMap(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ initiator_group_name=dict(required=True, type='str'),
+ path=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ lun_id=dict(required=False, type='str', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['path'])
+ ],
+ supports_check_mode=True
+ )
+
+ self.result = dict(
+ changed=False,
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.initiator_group_name = p['initiator_group_name']
+ self.path = p['path']
+ self.vserver = p['vserver']
+ self.lun_id = p['lun_id']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_lun_map(self):
+ """
+ Return details about the LUN map
+
+ :return: Details about the lun map
+ :rtype: dict
+ """
+ lun_info = netapp_utils.zapi.NaElement('lun-map-list-info')
+ lun_info.add_new_child('path', self.path)
+ result = self.server.invoke_successfully(lun_info, True)
+ return_value = None
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ initiator_group_name = igroup_info.get_child_content('initiator-group-name')
+ lun_id = igroup_info.get_child_content('lun-id')
+ if initiator_group_name == self.initiator_group_name:
+ return_value = {
+ 'lun_id': lun_id
+ }
+ break
+
+ return return_value
+
+ def get_lun(self):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ # build the lun query
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('path', self.path)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_query = netapp_utils.zapi.NaElement('lun-get-iter')
+ lun_query.add_child_elem(query)
+
+ # find lun using query
+ result = self.server.invoke_successfully(lun_query, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ lun = result.get_child_by_name('attributes-list').get_child_by_name('lun-info')
+
+ # extract and assign lun infomation to return value
+ hexlify = codecs.getencoder('hex')
+ naa_hex = to_text(hexlify(to_bytes(lun.get_child_content('serial-number')))[0])
+ return_value = {
+ 'lun_node': lun.get_child_content('node'),
+ 'lun_ostype': lun.get_child_content('multiprotocol-type'),
+ 'lun_serial': lun.get_child_content('serial-number'),
+ 'lun_naa_id': '600a0980' + naa_hex,
+ 'lun_state': lun.get_child_content('state'),
+ 'lun_size': lun.get_child_content('size'),
+ }
+
+ return return_value
+
+ def create_lun_map(self):
+ """
+ Create LUN map
+ """
+ options = {'path': self.path, 'initiator-group': self.initiator_group_name}
+ if self.lun_id is not None:
+ options['lun-id'] = self.lun_id
+ lun_map_create = netapp_utils.zapi.NaElement.create_node_with_children('lun-map', **options)
+
+ try:
+ self.server.invoke_successfully(lun_map_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error mapping lun %s of initiator_group_name %s: %s" %
+ (self.path, self.initiator_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_lun_map(self):
+ """
+ Unmap LUN map
+ """
+ lun_map_delete = netapp_utils.zapi.NaElement.create_node_with_children('lun-unmap', **{'path': self.path, 'initiator-group': self.initiator_group_name})
+
+ try:
+ self.server.invoke_successfully(lun_map_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error unmapping lun %s of initiator_group_name %s: %s" %
+ (self.path, self.initiator_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_lun_map", self.server)
+ lun_details = self.get_lun()
+ lun_map_details = self.get_lun_map()
+
+ if self.state == 'present' and lun_details:
+ self.result.update(lun_details)
+
+ if self.state == 'present' and not lun_map_details:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.create_lun_map()
+ elif self.state == 'absent' and lun_map_details:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.delete_lun_map()
+
+ self.module.exit_json(**self.result)
+
+
+def main():
+ v = NetAppOntapLUNMap()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py
new file mode 100644
index 00000000..415d6633
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This module implements the operations for ONTAP MCC Mediator.
+# The Mediator is supported for MCC IP configs from ONTAP 9.7 or later.
+# This module requires REST APIs for Mediator which is supported from
+# ONTAP 9.8 (DW) or later
+
+'''
+na_ontap_mcc_mediator
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_mcc_mediator
+short_description: NetApp ONTAP Add and Remove MetroCluster Mediator
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add and remove ONTAP MCC Mediator
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether MCCIP Mediator is present or not."
+ default: present
+ type: str
+
+ mediator_address:
+ description:
+ - ip address of the mediator
+ type: str
+ required: true
+
+ mediator_user:
+ description:
+ - username of the mediator
+ type: str
+ required: true
+
+ mediator_password:
+ description:
+ - password of the mediator
+ type: str
+ required: true
+
+'''
+
+EXAMPLES = """
+ - name: Add ONTAP MCCIP Mediator
+ na_ontap_mcc_mediator:
+ state: present
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ mediator_address: mediator_ip
+ mediator_user: metrocluster_admin
+ mediator_password: netapp1!
+
+ - name: Delete ONTAP MCCIP Mediator
+ na_ontap_mcc_mediator:
+ state: absent
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ mediator_user: metrocluster_admin
+ mediator_password: netapp1!
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppOntapMccipMediator(object):
+ """
+ Mediator object for Add/Remove/Display
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ mediator_address=dict(required=True, type='str'),
+ mediator_user=dict(required=True, type='str'),
+ mediator_password=dict(required=True, type='str', no_log=True),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_mcc_mediator'))
+
+ def add_mediator(self):
+ """
+ Adds an ONTAP Mediator to MCC configuration
+ """
+ api = 'cluster/mediators'
+ params = {
+ 'ip_address': self.parameters['mediator_address'],
+ 'password': self.parameters['mediator_password'],
+ 'user': self.parameters['mediator_user']
+ }
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def remove_mediator(self, current_uuid):
+ """
+ Removes the ONTAP Mediator from MCC configuration
+ """
+ api = 'cluster/mediators/%s' % current_uuid
+ params = {
+ 'ip_address': self.parameters['mediator_address'],
+ 'password': self.parameters['mediator_password'],
+ 'user': self.parameters['mediator_user'],
+ 'uuid': current_uuid
+ }
+ dummy, error = self.rest_api.delete(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def get_mediator(self):
+ """
+ Determine if the MCC configuration has added an ONTAP Mediator
+ """
+ api = "cluster/mediators"
+ message, error = self.rest_api.get(api, None)
+ if error:
+ self.module.fail_json(msg=error)
+ if message['num_records'] > 0:
+ return message['records'][0]['uuid']
+ return None
+
+ def apply(self):
+ """
+ Apply action to MCC Mediator
+ """
+ current = self.get_mediator()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.add_mediator()
+ elif cd_action == 'delete':
+ self.remove_mediator(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Add, Remove and display ONTAP MCC Mediator
+ """
+ mediator_obj = NetAppOntapMccipMediator()
+ mediator_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py
new file mode 100644
index 00000000..16344452
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+"""
+(c) 2020, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+module: na_ontap_metrocluster
+short_description: NetApp ONTAP set up a MetroCluster
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.9.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+requirements:
+ - ONTAP >= 9.8
+
+description:
+ - Configure MetroCluster.
+options:
+ state:
+ choices: ['present']
+ description:
+ - Present to set up a MetroCluster
+ default: present
+ type: str
+ dr_pairs:
+ description: disaster recovery pair
+ type: list
+ required: true
+ elements: dict
+ suboptions:
+ node_name:
+ description:
+ - the name of the main node
+ required: true
+ type: str
+ partner_node_name:
+ description:
+ - the name of the main partner node
+ required: true
+ type: str
+ partner_cluster_name:
+ description:
+ - The name of the partner Cluster
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+-
+ name: Manage MetroCluster
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: True
+ validate_certs: False
+ tasks:
+ - name: Create MetroCluster
+ na_ontap_metrocluster:
+ <<: *login
+ dr_pairs:
+ - partner_node_name: rha17-a2
+ node_name: rha17-b2
+ partner_cluster_name: rha2-b2b1_siteB
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPMetroCluster(object):
+ ''' ONTAP metrocluster operations '''
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present'], default='present'),
+ dr_pairs=dict(required=True, type='list', elements='dict', options=dict(
+ node_name=dict(required=True, type='str'),
+ partner_node_name=dict(required=True, type='str')
+ )),
+ partner_cluster_name=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_metrocluster'))
+
+ def get_metrocluster(self):
+ attrs = None
+ api = 'cluster/metrocluster'
+ options = {'fields': '*'}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if message is not None:
+ local = message['local']
+ if local['configuration_state'] != "not_configured":
+ attrs = {
+ 'configuration_state': local['configuration_state'],
+ 'partner_cluster_reachable': local['partner_cluster_reachable'],
+ 'partner_cluster_name': local['cluster']['name']
+ }
+ return attrs
+
+ def create_metrocluster(self):
+ api = 'cluster/metrocluster'
+ options = {}
+ dr_pairs = []
+ for pair in self.parameters['dr_pairs']:
+ dr_pairs.append({'node': {'name': pair['node_name']},
+ 'partner': {'name': pair['partner_node_name']}})
+ partner_cluster = {'name': self.parameters['partner_cluster_name']}
+ data = {'dr_pairs': dr_pairs, 'partner_cluster': partner_cluster}
+ message, error = self.rest_api.post(api, data, options)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def apply(self):
+ current = self.get_metrocluster()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_metrocluster()
+ # Since there is no modify or delete, we will return no change
+ else:
+ self.module.fail_json(msg="Modify and Delete currently not support in API")
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppONTAPMetroCluster()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py
new file mode 100644
index 00000000..d8345c3d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+"""
+(c) 2020, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+module: na_ontap_metrocluster_dr_group
+short_description: NetApp ONTAP manage MetroCluster DR Group
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.11.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+requirements:
+ - ONTAP >= 9.8
+description:
+ - Create/Delete MetroCluster DR Group
+ - Create only supports MCC IP
+ - Delete supports both MCC IP and MCC FC
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ add or remove DR groups
+ default: present
+ type: str
+ dr_pairs:
+ description: disaster recovery pairs
+ type: list
+ required: true
+ elements: dict
+ suboptions:
+ node_name:
+ description:
+ - the name of the main node
+ required: true
+ type: str
+ partner_node_name:
+ description:
+ - the name of the main partner node
+ required: true
+ type: str
+ partner_cluster_name:
+ description:
+ - The name of the partner cluster
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+-
+ name: Manage MetroCluster DR group
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: True
+ validate_certs: False
+ tasks:
+ - name: Create MetroCluster DR group
+ na_ontap_metrocluster_dr_group:
+ <<: *login
+ dr_pairs:
+ - partner_name: carchi_cluster3_01
+ node_name: carchi_cluster1_01
+ partner_cluster_name: carchi_cluster3
+ - name: Delete MetroCluster DR group
+ na_ontap_metrocluster_dr_group:
+ <<: *login
+ dr_pairs:
+ - partner_name: carchi_cluster3_01
+ node_name: carchi_cluster1_01
+ state: absent
+ partner_cluster_name: carchi_cluster3
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPMetroClusterDRGroup(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ dr_pairs=dict(required=True, type='list', elements='dict', options=dict(
+ node_name=dict(required=True, type='str'),
+ partner_node_name=dict(required=True, type='str')
+ )),
+ partner_cluster_name=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_metrocluster_dr_group',
+ version='9.8'))
+
+ def get_dr_group(self):
+ return_attrs = None
+ for pair in self.parameters['dr_pairs']:
+ api = 'cluster/metrocluster/dr-groups'
+ options = {'fields': '*',
+ 'dr_pairs.node.name': pair['node_name'],
+ 'dr_pairs.partner.name': pair['partner_node_name'],
+ 'partner_cluster.name': self.parameters['partner_cluster_name']}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'records' in message and message['num_records'] == 0:
+ continue
+ elif 'records' not in message or message['num_records'] != 1:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ record = message['records'][0]
+ return_attrs = {
+ 'partner_cluster_name': record['partner_cluster']['name'],
+ 'dr_pairs': [],
+ 'id': record['id']
+ }
+ for dr_pair in record['dr_pairs']:
+ return_attrs['dr_pairs'].append({'node_name': dr_pair['node']['name'], 'partner_node_name': dr_pair['partner']['name']})
+ # if we have an return_dr_id we don't need to loop anymore
+ break
+ return return_attrs
+
+ def get_dr_group_ids_from_nodes(self):
+ delete_ids = []
+ for pair in self.parameters['dr_pairs']:
+ api = 'cluster/metrocluster/nodes'
+ options = {'fields': '*',
+ 'node.name': pair['node_name']}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'records' in message and message['num_records'] == 0:
+ continue
+ elif 'records' not in message or message['num_records'] != 1:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ record = message['records'][0]
+ if int(record['dr_group_id']) not in delete_ids:
+ delete_ids.append(int(record['dr_group_id']))
+ return delete_ids
+
+ def create_dr_group(self):
+ api = 'cluster/metrocluster/dr-groups'
+ dr_pairs = []
+ for pair in self.parameters['dr_pairs']:
+ dr_pairs.append({'node': {'name': pair['node_name']},
+ 'partner': {'name': pair['partner_node_name']}})
+ partner_cluster = {'name': self.parameters['partner_cluster_name']}
+ data = {'dr_pairs': dr_pairs, 'partner_cluster': partner_cluster}
+ message, error = self.rest_api.post(api, data)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def delete_dr_groups(self, dr_ids):
+ for dr_id in dr_ids:
+ api = 'cluster/metrocluster/dr-groups/' + str(dr_id)
+ message, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def apply(self):
+ current = self.get_dr_group()
+ delete_ids = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and current is None and self.parameters['state'] == 'absent':
+ # check if there is some FC group to delete
+ delete_ids = self.get_dr_group_ids_from_nodes()
+ if delete_ids:
+ cd_action = 'delete'
+ self.na_helper.changed = True
+ elif cd_action == 'delete':
+ delete_ids = [current['id']]
+ if cd_action and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_dr_group()
+ if cd_action == 'delete':
+ self.delete_dr_groups(delete_ids)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppONTAPMetroClusterDRGroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py
new file mode 100644
index 00000000..617cf741
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_motd
+author:
+ - Piotr Olczak (@dprts) <polczak@redhat.com>
+ - NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+short_description: Setup motd
+description:
+ - This module allows you to manipulate motd for a vserver
+ - It also allows to manipulate motd at the cluster level by using the cluster vserver (cserver)
+version_added: 2.7.0
+requirements:
+ - netapp_lib
+options:
+ state:
+ description:
+ - If C(state=present) sets MOTD given in I(message) C(state=absent) removes it.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ motd_message:
+ description:
+ - MOTD Text message.
+ type: str
+ aliases:
+ - message
+ vserver:
+ description:
+ - The name of the SVM motd should be set for.
+ required: true
+ type: str
+ show_cluster_motd:
+ description:
+ - Set to I(false) if Cluster-level Message of the Day should not be shown
+ type: bool
+ default: True
+
+'''
+
+EXAMPLES = '''
+
+- name: Set Cluster-Level MOTD
+ na_ontap_motd:
+ vserver: my_ontap_cluster
+ motd_message: "Cluster wide MOTD"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: present
+ https: true
+
+- name: Set MOTD for I(rhev_nfs_krb) SVM, do not show Cluster-Level MOTD
+ na_ontap_motd:
+ vserver: rhev_nfs_krb
+ motd_message: "Access to rhev_nfs_krb is also restricted"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: present
+ show_cluster_motd: False
+ https: true
+
+- name: Remove Cluster-Level MOTD
+ na_ontap_motd:
+ vserver: my_ontap_cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: absent
+ https: true
+
+'''
+
+RETURN = '''
+
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPMotd(object):
+
+ def __init__(self):
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
+ vserver=dict(required=True, type='str'),
+ motd_message=dict(default='', type='str', aliases=['message']),
+ show_cluster_motd=dict(default=True, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def motd_get_iter(self):
+ """
+ Compose NaElement object to query current motd
+ :return: NaElement object for vserver-motd-get-iter
+ """
+ motd_get_iter = netapp_utils.zapi.NaElement('vserver-motd-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('is-cluster-message-enabled', str(self.parameters['show_cluster_motd']))
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_get_iter.add_child_elem(query)
+ return motd_get_iter
+
+ def motd_get(self):
+ """
+ Get current motd
+ :return: Dictionary of current motd details if query successful, else None
+ """
+ motd_get_iter = self.motd_get_iter()
+ motd_result = dict()
+ try:
+ result = self.server.invoke_successfully(motd_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching motd info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ motd_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-motd-info')
+ motd_result['motd_message'] = motd_info.get_child_content('message')
+ motd_result['motd_message'] = str(motd_result['motd_message']).rstrip()
+ motd_result['show_cluster_motd'] = True if motd_info.get_child_content(
+ 'is-cluster-message-enabled') == 'true' else False
+ motd_result['vserver'] = motd_info.get_child_content('vserver')
+ return motd_result
+ return None
+
+ def modify_motd(self):
+ motd_create = netapp_utils.zapi.NaElement('vserver-motd-modify-iter')
+ motd_create.add_new_child('message', self.parameters['motd_message'])
+ motd_create.add_new_child(
+ 'is-cluster-message-enabled', 'true' if self.parameters['show_cluster_motd'] is True else 'false')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_create.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(motd_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error creating motd: %s" % (to_native(err)), exception=traceback.format_exc())
+ return motd_create
+
+ def apply(self):
+ """
+ Applies action from playbook
+ """
+ netapp_utils.ems_log_event("na_ontap_motd", self.server)
+ current = self.motd_get()
+ if self.parameters['state'] == 'absent':
+ # Just make sure it is empty
+ self.parameters['motd_message'] = ''
+ if current and current['motd_message'] == 'None':
+ current = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.modify_motd()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ motd_obj = NetAppONTAPMotd()
+ motd_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py
new file mode 100644
index 00000000..3e56f636
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Modify Name Service Switch
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_name_service_switch
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified ns-switch should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ database_type:
+ description:
+ - Name services switch database.
+ choices: ['hosts','group', 'passwd', 'netgroup', 'namemap']
+ required: true
+ type: str
+ sources:
+ description:
+ - Type of sources.
+ - Possible values include files,dns,ldap,nis.
+ type: list
+ elements: str
+
+short_description: "NetApp ONTAP Manage name service switch"
+'''
+
+EXAMPLES = """
+ - name: create name service database
+ na_ontap_name_service_switch:
+ state: present
+ database_type: namemap
+ sources: files,ldap
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+
+ - name: modify name service database sources
+ na_ontap_name_service_switch:
+ state: present
+ database_type: namemap
+ sources: files
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNsswitch(object):
+ """
+ Class with NVMe service methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ database_type=dict(required=True, type='str', choices=['hosts', 'group', 'passwd', 'netgroup', 'namemap']),
+ sources=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_name_service_switch(self):
+ """
+ get current name service switch config
+ :return: dict of current name service switch
+ """
+ nss_iter = netapp_utils.zapi.NaElement('nameservice-nsswitch-get-iter')
+ nss_info = netapp_utils.zapi.NaElement('namservice-nsswitch-config-info')
+ db_type = netapp_utils.zapi.NaElement('nameservice-database')
+ db_type.set_content(self.parameters['database_type'])
+ query = netapp_utils.zapi.NaElement('query')
+ nss_info.add_child_elem(db_type)
+ query.add_child_elem(nss_info)
+ nss_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(nss_iter, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ nss_sources = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'namservice-nsswitch-config-info').get_child_by_name('nameservice-sources')
+ sources = [sources.get_content() for sources in nss_sources.get_children()]
+ return_value = {
+ 'sources': sources
+ }
+ return return_value
+
+ def create_name_service_switch(self):
+ """
+ create name service switch config
+ :return: None
+ """
+ nss_create = netapp_utils.zapi.NaElement('nameservice-nsswitch-create')
+ nss_create.add_new_child('nameservice-database', self.parameters['database_type'])
+ nss_sources = netapp_utils.zapi.NaElement('nameservice-sources')
+ nss_create.add_child_elem(nss_sources)
+ for source in self.parameters['sources']:
+ nss_sources.add_new_child('nss-source-type', source.strip())
+ try:
+ self.server.invoke_successfully(nss_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on creating name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_name_service_switch(self):
+ """
+ delete name service switch
+ :return: None
+ """
+ nss_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'nameservice-nsswitch-destroy', **{'nameservice-database': self.parameters['database_type']})
+ try:
+ self.server.invoke_successfully(nss_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on deleting name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_name_service_switch(self, modify):
+ """
+ modify name service switch
+ :param modify: dict of modify attributes
+ :return: None
+ """
+ nss_modify = netapp_utils.zapi.NaElement('nameservice-nsswitch-modify')
+ nss_modify.add_new_child('nameservice-database', self.parameters['database_type'])
+ nss_sources = netapp_utils.zapi.NaElement('nameservice-sources')
+ nss_modify.add_child_elem(nss_sources)
+ if 'sources' in modify:
+ for source in self.parameters['sources']:
+ nss_sources.add_new_child('nss-source-type', source.strip())
+ try:
+ self.server.invoke_successfully(nss_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on modifying name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_name_service_switch", self.server)
+ current = self.get_name_service_switch()
+ cd_action, modify = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_name_service_switch()
+ elif cd_action == 'delete':
+ self.delete_name_service_switch()
+ elif modify:
+ self.modify_name_service_switch(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Applyoperations from playbook'''
+ nss = NetAppONTAPNsswitch()
+ nss.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py
new file mode 100644
index 00000000..528b168e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py
@@ -0,0 +1,407 @@
+#!/usr/bin/python
+""" this is ndmp module
+
+ (c) 2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+
+DOCUMENTATION = '''
+---
+module: na_ontap_ndmp
+short_description: NetApp ONTAP NDMP services configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Modify NDMP Services.
+
+options:
+
+ vserver:
+ description:
+ - Name of the vserver.
+ required: true
+ type: str
+
+ abort_on_disk_error:
+ description:
+ - Enable abort on disk error.
+ type: bool
+
+ authtype:
+ description:
+ - Authentication type.
+ type: list
+ elements: str
+
+ backup_log_enable:
+ description:
+ - Enable backup log.
+ type: bool
+
+ data_port_range:
+ description:
+ - Data port range. Modification not supported for data Vservers.
+ type: str
+
+ debug_enable:
+ description:
+ - Enable debug.
+ type: bool
+
+ debug_filter:
+ description:
+ - Debug filter.
+ type: str
+
+ dump_detailed_stats:
+ description:
+ - Enable logging of VM stats for dump.
+ type: bool
+
+ dump_logical_find:
+ description:
+ - Enable logical find for dump.
+ type: str
+
+ enable:
+ description:
+ - Enable NDMP on vserver.
+ type: bool
+
+ fh_dir_retry_interval:
+ description:
+ - FH throttle value for dir.
+ type: int
+
+ fh_node_retry_interval:
+ description:
+ - FH throttle value for node.
+ type: int
+
+ ignore_ctime_enabled:
+ description:
+ - Ignore ctime.
+ type: bool
+
+ is_secure_control_connection_enabled:
+ description:
+ - Is secure control connection enabled.
+ type: bool
+
+ offset_map_enable:
+ description:
+ - Enable offset map.
+ type: bool
+
+ per_qtree_exclude_enable:
+ description:
+ - Enable per qtree exclusion.
+ type: bool
+
+ preferred_interface_role:
+ description:
+ - Preferred interface role.
+ type: list
+ elements: str
+
+ restore_vm_cache_size:
+ description:
+ - Restore VM file cache size. Value range [4-1024]
+ type: int
+
+ secondary_debug_filter:
+ description:
+ - Secondary debug filter.
+ type: str
+
+ tcpnodelay:
+ description:
+ - Enable TCP nodelay.
+ type: bool
+
+ tcpwinsize:
+ description:
+ - TCP window size.
+ type: int
+'''
+
+EXAMPLES = '''
+ - name: modify ndmp
+ na_ontap_ndmp:
+ vserver: ansible
+ hostname: "{{ hostname }}"
+ abort_on_disk_error: true
+ authtype: plaintext,challenge
+ backup_log_enable: true
+ data_port_range: 8000-9000
+ debug_enable: true
+ debug_filter: filter
+ dump_detailed_stats: true
+ dump_logical_find: default
+ enable: true
+ fh_dir_retry_interval: 100
+ fh_node_retry_interval: 100
+ ignore_ctime_enabled: true
+ is_secure_control_connection_enabled: true
+ offset_map_enable: true
+ per_qtree_exclude_enable: true
+ preferred_interface_role: node_mgmt,intercluster
+ restore_vm_cache_size: 1000
+ secondary_debug_filter: filter
+ tcpnodelay: true
+ tcpwinsize: 10000
+ username: user
+ password: pass
+ https: False
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNdmp(object):
+ '''
+ modify vserver cifs security
+ '''
+ def __init__(self):
+ self.use_rest = False
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.modifiable_options = dict(
+ abort_on_disk_error=dict(required=False, type='bool'),
+ authtype=dict(required=False, type='list', elements='str'),
+ backup_log_enable=dict(required=False, type='bool'),
+ data_port_range=dict(required=False, type='str'),
+ debug_enable=dict(required=False, type='bool'),
+ debug_filter=dict(required=False, type='str'),
+ dump_detailed_stats=dict(required=False, type='bool'),
+ dump_logical_find=dict(required=False, type='str'),
+ enable=dict(required=False, type='bool'),
+ fh_dir_retry_interval=dict(required=False, type='int'),
+ fh_node_retry_interval=dict(required=False, type='int'),
+ ignore_ctime_enabled=dict(required=False, type='bool'),
+ is_secure_control_connection_enabled=dict(required=False, type='bool'),
+ offset_map_enable=dict(required=False, type='bool'),
+ per_qtree_exclude_enable=dict(required=False, type='bool'),
+ preferred_interface_role=dict(required=False, type='list', elements='str'),
+ restore_vm_cache_size=dict(required=False, type='int'),
+ secondary_debug_filter=dict(required=False, type='str'),
+ tcpnodelay=dict(required=False, type='bool'),
+ tcpwinsize=dict(required=False, type='int')
+ )
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.argument_spec.update(self.modifiable_options)
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['abort_on_disk_error', 'backup_log_enable', 'data_port_range',
+ 'debug_enable', 'debug_filter', 'dump_detailed_stats',
+ 'dump_logical_find', 'fh_dir_retry_interval', 'fh_node_retry_interval',
+ 'ignore_ctime_enabled', 'is_secure_control_connection_enabled',
+ 'offset_map_enable', 'per_qtree_exclude_enable', 'preferred_interface_role',
+ 'restore_vm_cache_size', 'secondary_debug_filter', 'tcpnodelay', 'tcpwinsize']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ndmp_svm_uuid(self):
+
+ """
+ Get a svm's UUID
+ :return: uuid of the node
+ """
+ params = {'svm.name': self.parameters['vserver']}
+ api = "protocols/ndmp/svms"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if 'records' in message and len(message['records']) == 0:
+ self.module.fail_json(msg='Error fetching uuid for vserver %s: ' % (self.parameters['vserver']))
+ if len(message.keys()) == 0:
+ error = "No information collected from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ elif 'records' not in message:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ return message['records'][0]['svm']['uuid']
+
+ def ndmp_get_iter(self, uuid=None):
+ """
+ get current vserver ndmp attributes.
+ :return: a dict of ndmp attributes.
+ """
+ if self.use_rest:
+ data = dict()
+ params = {'fields': 'authentication_types,enabled'}
+ api = '/protocols/ndmp/svms/' + uuid
+ message, error = self.rest_api.get(api, params)
+ data['enable'] = message['enabled']
+ data['authtype'] = message['authentication_types']
+
+ if error:
+ self.module.fail_json(msg=error)
+ return data
+ else:
+ ndmp_get = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ ndmp_info = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-info')
+ ndmp_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(ndmp_info)
+ ndmp_get.add_child_elem(query)
+ ndmp_details = dict()
+ try:
+ result = self.server.invoke_successfully(ndmp_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching ndmp from %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ ndmp_attributes = result.get_child_by_name('attributes-list').get_child_by_name('ndmp-vserver-attributes-info')
+ self.get_ndmp_details(ndmp_details, ndmp_attributes)
+ return ndmp_details
+
+ def get_ndmp_details(self, ndmp_details, ndmp_attributes):
+ """
+ :param ndmp_details: a dict of current ndmp.
+ :param ndmp_attributes: ndmp returned from api call in xml format.
+ :return: None
+ """
+ for option in self.modifiable_options:
+ option_type = self.modifiable_options[option]['type']
+ if option_type == 'bool':
+ ndmp_details[option] = self.str_to_bool(ndmp_attributes.get_child_content(self.attribute_to_name(option)))
+ elif option_type == 'int':
+ ndmp_details[option] = int(ndmp_attributes.get_child_content(self.attribute_to_name(option)))
+ elif option_type == 'list':
+ child_list = ndmp_attributes.get_child_by_name(self.attribute_to_name(option))
+ values = [child.get_content() for child in child_list.get_children()]
+ ndmp_details[option] = values
+ else:
+ ndmp_details[option] = ndmp_attributes.get_child_content(self.attribute_to_name(option))
+
+ def modify_ndmp(self, modify):
+ """
+ :param modify: A list of attributes to modify
+ :return: None
+ """
+ if self.use_rest:
+ ndmp = dict()
+ uuid = self.get_ndmp_svm_uuid()
+ if self.parameters.get('enable'):
+ ndmp['enabled'] = self.parameters['enable']
+ if self.parameters.get('authtype'):
+ ndmp['authentication_types'] = self.parameters['authtype']
+ api = "protocols/ndmp/svms/" + uuid
+ dummy, error = self.rest_api.patch(api, ndmp)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+
+ ndmp_modify = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-modify')
+ for attribute in modify:
+ if attribute == 'authtype':
+ authtypes = netapp_utils.zapi.NaElement('authtype')
+ types = self.parameters['authtype']
+ for authtype in types:
+ authtypes.add_new_child('ndmpd-authtypes', authtype)
+ ndmp_modify.add_child_elem(authtypes)
+ elif attribute == 'preferred_interface_role':
+ preferred_interface_roles = netapp_utils.zapi.NaElement('preferred-interface-role')
+ roles = self.parameters['preferred_interface_role']
+ for role in roles:
+ preferred_interface_roles.add_new_child('netport-role', role)
+ ndmp_modify.add_child_elem(preferred_interface_roles)
+ else:
+ ndmp_modify.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(ndmp_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error modifying ndmp on %s: %s'
+ % (self.parameters['vserver'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ @staticmethod
+ def str_to_bool(value):
+ return value == 'true'
+
+ def apply(self):
+ """Call modify operations."""
+ uuid = None
+ if not self.use_rest:
+ self.asup_log_for_cserver("na_ontap_ndmp")
+ if self.use_rest:
+ # we only have the svm name, we need to the the uuid for the svm
+ uuid = self.get_ndmp_svm_uuid()
+ current = self.ndmp_get_iter(uuid=uuid)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify:
+ self.modify_ndmp(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ obj = NetAppONTAPNdmp()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py
new file mode 100644
index 00000000..675fa44e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_net_ifgrp
+short_description: NetApp Ontap modify network interface group
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, modify ports, destroy the network interface group
+options:
+ state:
+ description:
+ - Whether the specified network interface group should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ distribution_function:
+ description:
+ - Specifies the traffic distribution function for the ifgrp.
+ choices: ['mac', 'ip', 'sequential', 'port']
+ type: str
+
+ name:
+ description:
+ - Specifies the interface group name.
+ required: true
+ type: str
+
+ mode:
+ description:
+ - Specifies the link policy for the ifgrp.
+ type: str
+
+ node:
+ description:
+ - Specifies the name of node.
+ required: true
+ type: str
+
+ ports:
+ aliases:
+ - port
+ description:
+ - List of expected ports to be present in the interface group.
+ - If a port is present in this list, but not on the target, it will be added.
+ - If a port is not in the list, but present on the target, it will be removed.
+ - Make sure the list contains all ports you want to see on the target.
+ version_added: 2.8.0
+ type: list
+ elements: str
+"""
+
+EXAMPLES = """
+ - name: create ifgrp
+ na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ distribution_function: ip
+ name: a0c
+ ports: [e0a]
+ mode: multimode
+ node: "{{ Vsim node name }}"
+ - name: modify ports in an ifgrp
+ na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ distribution_function: ip
+ name: a0c
+ port: [e0a, e0c]
+ mode: multimode
+ node: "{{ Vsim node name }}"
+ - name: delete ifgrp
+ na_ontap_net_ifgrp:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: a0c
+ node: "{{ Vsim node name }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapIfGrp(object):
+ """
+ Create, Modifies and Destroys a IfGrp
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap IfGrp class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ distribution_function=dict(required=False, type='str', choices=['mac', 'ip', 'sequential', 'port']),
+ name=dict(required=True, type='str'),
+ mode=dict(required=False, type='str'),
+ node=dict(required=True, type='str'),
+ ports=dict(required=False, type='list', elements='str', aliases=["port"]),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['distribution_function', 'mode'])
+ ],
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_if_grp(self):
+ """
+ Return details about the if_group
+ :param:
+ name : Name of the if_group
+
+ :return: Details about the if_group. None if not found.
+ :rtype: dict
+ """
+ if_group_iter = netapp_utils.zapi.NaElement('net-port-get-iter')
+ if_group_info = netapp_utils.zapi.NaElement('net-port-info')
+ if_group_info.add_new_child('port', self.parameters['name'])
+ if_group_info.add_new_child('port-type', 'if_group')
+ if_group_info.add_new_child('node', self.parameters['node'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(if_group_info)
+ if_group_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(if_group_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+
+ if result.get_child_by_name('num-records') and int(result['num-records']) >= 1:
+ if_group_attributes = result['attributes-list']['net-port-info']
+ return_value = {
+ 'name': if_group_attributes['port'],
+ 'distribution_function': if_group_attributes['ifgrp-distribution-function'],
+ 'mode': if_group_attributes['ifgrp-mode'],
+ 'node': if_group_attributes['node'],
+ }
+
+ return return_value
+
+ def get_if_grp_ports(self):
+ """
+ Return ports of the if_group
+ :param:
+ name : Name of the if_group
+ :return: Ports of the if_group. None if not found.
+ :rtype: dict
+ """
+ if_group_iter = netapp_utils.zapi.NaElement('net-port-ifgrp-get')
+ if_group_iter.add_new_child('ifgrp-name', self.parameters['name'])
+ if_group_iter.add_new_child('node', self.parameters['node'])
+ try:
+ result = self.server.invoke_successfully(if_group_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting if_group ports %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ port_list = []
+ if result.get_child_by_name('attributes'):
+ if_group_attributes = result['attributes']['net-ifgrp-info']
+ if if_group_attributes.get_child_by_name('ports'):
+ ports = if_group_attributes.get_child_by_name('ports').get_children()
+ for each in ports:
+ port_list.append(each.get_content())
+ return {'ports': port_list}
+
+ def create_if_grp(self):
+ """
+ Creates a new ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-create")
+ route_obj.add_new_child("distribution-function", self.parameters['distribution_function'])
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("mode", self.parameters['mode'])
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('ports') is not None:
+ for port in self.parameters.get('ports'):
+ self.add_port_to_if_grp(port)
+
+ def delete_if_grp(self):
+ """
+ Deletes a ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-destroy")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_port_to_if_grp(self, port):
+ """
+ adds port to a ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-add-port")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("port", port)
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port %s to if_group %s: %s' %
+ (port, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ports(self, current_ports):
+ add_ports = set(self.parameters['ports']) - set(current_ports)
+ remove_ports = set(current_ports) - set(self.parameters['ports'])
+ for port in add_ports:
+ self.add_port_to_if_grp(port)
+ for port in remove_ports:
+ self.remove_port_to_if_grp(port)
+
+ def remove_port_to_if_grp(self, port):
+ """
+ removes port from a ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-remove-port")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("port", port)
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port %s to if_group %s: %s' %
+ (port, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_net_ifgrp", cserver)
+
+ def apply(self):
+ self.autosupport_log()
+ current, modify = self.get_if_grp(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ current_ports = self.get_if_grp_ports()
+ modify = self.na_helper.get_modified_attributes(current_ports, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_if_grp()
+ elif cd_action == 'delete':
+ self.delete_if_grp()
+ elif modify:
+ self.modify_ports(current_ports['ports'])
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapIfGrp()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py
new file mode 100644
index 00000000..27b10174
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_net_port
+short_description: NetApp ONTAP network ports.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify a ONTAP network port.
+options:
+ state:
+ description:
+ - Whether the specified net port should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+ node:
+ description:
+ - Specifies the name of node.
+ required: true
+ type: str
+ ports:
+ aliases:
+ - port
+ description:
+ - Specifies the name of port(s).
+ required: true
+ type: list
+ elements: str
+ mtu:
+ description:
+ - Specifies the maximum transmission unit (MTU) reported by the port.
+ type: str
+ autonegotiate_admin:
+ description:
+ - Enables or disables Ethernet auto-negotiation of speed,
+ duplex and flow control.
+ type: str
+ duplex_admin:
+ description:
+ - Specifies the user preferred duplex setting of the port.
+ - Valid values auto, half, full
+ type: str
+ speed_admin:
+ description:
+ - Specifies the user preferred speed setting of the port.
+ type: str
+ flowcontrol_admin:
+ description:
+ - Specifies the user preferred flow control setting of the port.
+ type: str
+ ipspace:
+ description:
+ - Specifies the port's associated IPspace name.
+ - The 'Cluster' ipspace is reserved for cluster ports.
+ type: str
+"""
+
+EXAMPLES = """
+ - name: Modify Net Port
+ na_ontap_net_port:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ node: "{{ node_name }}"
+ ports: e0d,e0c
+ autonegotiate_admin: true
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNetPort(object):
+ """
+ Modify a Net port
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap Net Port Class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ node=dict(required=True, type="str"),
+ ports=dict(required=True, type='list', elements='str', aliases=['port']),
+ mtu=dict(required=False, type="str", default=None),
+ autonegotiate_admin=dict(required=False, type="str", default=None),
+ duplex_admin=dict(required=False, type="str", default=None),
+ speed_admin=dict(required=False, type="str", default=None),
+ flowcontrol_admin=dict(required=False, type="str", default=None),
+ ipspace=dict(required=False, type="str", default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'mtu': 'mtu',
+ 'autonegotiate_admin': 'is-administrative-auto-negotiate',
+ 'duplex_admin': 'administrative-duplex',
+ 'speed_admin': 'administrative-speed',
+ 'flowcontrol_admin': 'administrative-flowcontrol',
+ 'ipspace': 'ipspace'
+ }
+
+ def get_net_port(self, port):
+ """
+ Return details about the net port
+ :param: port: Name of the port
+ :return: Dictionary with current state of the port. None if not found.
+ :rtype: dict
+ """
+ net_port_get = netapp_utils.zapi.NaElement('net-port-get-iter')
+ attributes = {
+ 'query': {
+ 'net-port-info': {
+ 'node': self.parameters['node'],
+ 'port': port
+ }
+ }
+ }
+ net_port_get.translate_struct(attributes)
+
+ try:
+ result = self.server.invoke_successfully(net_port_get, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ port_info = result['attributes-list']['net-port-info']
+ port_details = dict()
+ else:
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting net ports for %s: %s' % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ port_details[item_key] = port_info.get_child_content(zapi_key)
+ return port_details
+
+ def modify_net_port(self, port, modify):
+ """
+ Modify a port
+
+ :param port: Name of the port
+ :param modify: dict with attributes to be modified
+ :return: None
+ """
+ port_modify = netapp_utils.zapi.NaElement('net-port-modify')
+ port_attributes = {'node': self.parameters['node'],
+ 'port': port}
+ for key in modify:
+ if key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(key)
+ port_attributes[zapi_key] = modify[key]
+ port_modify.translate_struct(port_attributes)
+ try:
+ self.server.invoke_successfully(port_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying net ports for %s: %s' % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ AutoSupport log for na_ontap_net_port
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_net_port", cserver)
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+
+ self.autosupport_log()
+ # Run the task for all ports in the list of 'ports'
+ for port in self.parameters['ports']:
+ current = self.get_net_port(port)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify:
+ self.modify_net_port(port, modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create the NetApp Ontap Net Port Object and modify it
+ """
+ obj = NetAppOntapNetPort()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py
new file mode 100644
index 00000000..58eed34e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_net_routes
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_net_routes
+short_description: NetApp ONTAP network routes
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify ONTAP network routes.
+options:
+ state:
+ description:
+ - Whether you want to create or delete a network route.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ vserver:
+ description:
+ - The name of the vserver.
+ required: true
+ type: str
+ destination:
+ description:
+ - Specify the route destination.
+ - Example 10.7.125.5/20, fd20:13::/64.
+ required: true
+ type: str
+ gateway:
+ description:
+ - Specify the route gateway.
+ - Example 10.7.125.1, fd20:13::1.
+ required: true
+ type: str
+ metric:
+ description:
+ - Specify the route metric.
+ - If this field is not provided the default will be set to 20.
+ type: int
+ from_destination:
+ description:
+ - Specify the route destination that should be changed.
+ - new_destination was removed to fix idempotency issues. To rename destination the original goes to from_destination and the new goes to destination.
+ version_added: 2.8.0
+ type: str
+ from_gateway:
+ description:
+ - Specify the route gateway that should be changed.
+ version_added: 2.8.0
+ type: str
+ from_metric:
+ description:
+ - Specify the route metric that should be changed.
+ version_added: 2.8.0
+ type: int
+'''
+
+EXAMPLES = """
+ - name: create route
+ na_ontap_net_routes:
+ state: present
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ destination: 10.7.125.5/20
+ gateway: 10.7.125.1
+ metric: 30
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNetRoutes(object):
+ """
+ Create, Modifies and Destroys a Net Route
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap Net Route class
+ """
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ destination=dict(required=True, type='str'),
+ gateway=dict(required=True, type='str'),
+ metric=dict(required=False, type='int'),
+ from_destination=dict(required=False, type='str', default=None),
+ from_gateway=dict(required=False, type='str', default=None),
+ from_metric=dict(required=False, type='int', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['metric', 'from_metric']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+
+ if error is not None:
+ self.module.fail_json(msg=error)
+
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def create_net_route(self, current_metric=None):
+ """
+ Creates a new Route
+ """
+ if self.use_rest:
+ api = "network/ip/routes"
+ params = {'gateway': self.parameters['gateway'],
+ 'svm': self.parameters['vserver']}
+ if self.parameters.get('destination') is not None:
+ dest = self.parameters['destination'].split('/')
+ params['destination'] = {'address': dest[0], 'netmask': dest[1]}
+ __, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ route_obj = netapp_utils.zapi.NaElement('net-routes-create')
+ route_obj.add_new_child("destination", self.parameters['destination'])
+ route_obj.add_new_child("gateway", self.parameters['gateway'])
+ if current_metric is None and self.parameters.get('metric') is not None:
+ metric = self.parameters['metric']
+ else:
+ metric = current_metric
+ # Metric can be None, Can't set metric to none
+ if metric is not None:
+ route_obj.add_new_child("metric", str(metric))
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating net route: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_net_route(self, params):
+ """
+ Deletes a given Route
+ """
+ if self.use_rest:
+ uuid = params['uuid']
+ api = "network/ip/routes/" + uuid
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ route_obj = netapp_utils.zapi.NaElement('net-routes-destroy')
+ if params is None:
+ params = self.parameters
+ route_obj.add_new_child("destination", params['destination'])
+ route_obj.add_new_child("gateway", params['gateway'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting net route: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_net_route(self, current, desired):
+ """
+ Modify a net route
+ Since we cannot modify a route, we are deleting the existing route, and creating a new one.
+ """
+ if self.use_rest:
+ if desired.get('destination') is not None:
+ dest = desired['destination'].split('/')
+ if dest[0] != current['destination']['address'] or dest[1] != current['destination']['netmask']:
+ self.na_helper.changed = True
+ self.parameters['destination'] = desired['destination']
+ else:
+ self.parameters['destination'] = '%s/%s' % (current['destination']['address'],
+ current['destination']['netmask'])
+ if desired.get('gateway') is not None:
+ if desired['gateway'] != current['gateway']:
+ self.na_helper.changed = True
+ self.parameters['gateway'] = desired['gateway']
+ else:
+ self.parameters['gateway'] = current['gateway']
+ if not self.na_helper.changed or self.module.check_mode:
+ return
+ params = {'destination': '%s/%s' % (current['destination']['address'], current['destination']['netmask']),
+ 'gateway': current['gateway']}
+ target = self.get_net_route(params)
+ self.delete_net_route(target)
+ self.create_net_route()
+ return
+
+ else:
+ # return if there is nothing to change
+ for key, val in desired.items():
+ if val != current[key]:
+ self.na_helper.changed = True
+ break
+ if not self.na_helper.changed or self.module.check_mode:
+ return
+ # delete and re-create with new params
+ self.delete_net_route(current)
+ route_obj = netapp_utils.zapi.NaElement('net-routes-create')
+ for attribute in ['metric', 'destination', 'gateway']:
+ if desired.get(attribute) is not None:
+ value = desired[attribute]
+ else:
+ value = current[attribute]
+ route_obj.add_new_child(attribute, str(value))
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ # restore the old route, create the route with the existing metric
+ self.create_net_route(current['metric'])
+ # return if desired route already exists
+ if to_native(error.code) == '13001':
+ return
+ # Invalid value specified for any of the attributes
+ self.module.fail_json(msg='Error modifying net route: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_net_route(self, params=None):
+ """
+ Checks to see if a route exist or not
+ :return: NaElement object if a route exists, None otherwise
+ """
+ if params is not None:
+ # we need either destination or gateway to fetch desired route
+ if params.get('destination') is None and params.get('gateway') is None:
+ return None
+ if self.use_rest:
+ api = "network/ip/routes"
+ data = {'fields': 'destination,gateway,svm'}
+ message, error = self.rest_api.get(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response in get_net_route from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ if params is None:
+ params = self.parameters
+ else:
+ if params.get('destination') is None:
+ params['destination'] = self.parameters['destination']
+ if params.get('gateway') is None:
+ params['gateway'] = self.parameters['gateway']
+ params['vserver'] = self.parameters['vserver']
+ for record in message['records']:
+ if record['gateway'] == params['gateway'] and \
+ record['destination']['address'] == params['destination'].split('/')[0] and \
+ record.get('svm') and record['svm']['name'] == params['vserver']:
+ return record
+ return None
+ else:
+ current = None
+ route_obj = netapp_utils.zapi.NaElement('net-routes-get')
+ for attr in ['destination', 'gateway']:
+ if params and params.get(attr) is not None:
+ value = params[attr]
+ else:
+ value = self.parameters[attr]
+ route_obj.add_new_child(attr, value)
+ try:
+ result = self.server.invoke_successfully(route_obj, True)
+ if result.get_child_by_name('attributes') is not None:
+ route_info = result.get_child_by_name('attributes').get_child_by_name('net-vs-routes-info')
+ current = {
+ 'destination': route_info.get_child_content('destination'),
+ 'gateway': route_info.get_child_content('gateway'),
+ 'metric': int(route_info.get_child_content('metric'))
+ }
+
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13040 denotes a route doesn't exist.
+ if to_native(error.code) == "15661":
+ return None
+ self.module.fail_json(msg='Error fetching net route: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ return current
+
+ @staticmethod
+ def is_modify_action(current, desired):
+ """
+ Get desired action to be applied for net routes
+ Destination and gateway are unique params for a route and cannot be duplicated
+ So if a route with desired destination or gateway exists already, we don't try to modify
+ :param current: current details
+ :param desired: desired details
+ :return: create / delete / modify / None
+ """
+ if current is None and desired is None:
+ # this is invalid
+ # cannot modify a non existent resource
+ return None
+ if current is None and desired is not None:
+ # idempotency or duplication
+ # we need not create
+ return False
+ if current is not None and desired is not None:
+ # we can't modify an ambiguous route (idempotency/duplication)
+ return False
+ return True
+
+ def get_params_to_be_modified(self, current):
+ """
+ Get parameters and values that need to be modified
+ :param current: current details
+ :return: dict(), None
+ """
+ if current is None:
+ return None
+ desired = dict()
+ if self.parameters.get('new_destination') is not None and \
+ self.parameters['new_destination'] != current['destination']:
+ desired['destination'] = self.parameters['new_destination']
+ if self.parameters.get('new_gateway') is not None and \
+ self.parameters['new_gateway'] != current['gateway']:
+ desired['gateway'] = self.parameters['new_gateway']
+ if self.parameters.get('new_metric') is not None and \
+ self.parameters['new_metric'] != current['metric']:
+ desired['metric'] = self.parameters['new_metric']
+ return desired
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_net_routes", self.server)
+ current = self.get_net_route()
+ modify, cd_action = None, None
+ if self.use_rest:
+ modify_params = {'gateway': self.parameters.get('from_gateway'),
+ 'destination': self.parameters.get('from_destination')}
+ if any(modify_params.values()):
+ # destination and gateway combination is unique, and is considered like a id. so modify destination
+ # or gateway is considered a rename action.
+ old_params = self.get_net_route(modify_params)
+ modify = self.na_helper.is_rename_action(old_params, current)
+ if modify is None:
+ self.module.fail_json(msg="Error modifying: route %s does not exist" % self.parameters['from_destination'])
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ else:
+ modify_params = {'destination': self.parameters.get('from_destination'),
+ 'gateway': self.parameters.get('from_gateway'),
+ 'metric': self.parameters.get('from_metric')}
+ # if any from_* param is present in playbook, check for modify action
+ if any(modify_params.values()):
+ # destination and gateway combination is unique, and is considered like a id. so modify destination
+ # or gateway is considered a rename action. metric is considered an attribute of the route so it is
+ # considered as modify.
+ if modify_params.get('metric') is not None:
+ modify = True
+ old_params = current
+ else:
+ # get parameters that are eligible for modify
+ old_params = self.get_net_route(modify_params)
+ modify = self.na_helper.is_rename_action(old_params, current)
+ if modify is None:
+ self.module.fail_json(msg="Error modifying: route %s does not exist" % self.parameters['from_destination'])
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create':
+ if not self.module.check_mode:
+ self.create_net_route()
+ elif cd_action == 'delete':
+ if not self.module.check_mode:
+ self.delete_net_route(current)
+ elif modify:
+ desired = {}
+ for key, value in old_params.items():
+ desired[key] = value
+ for key, value in modify_params.items():
+ if value is not None:
+ desired[key] = self.parameters.get(key)
+ self.modify_net_route(old_params, desired)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapNetRoutes()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py
new file mode 100644
index 00000000..bd52ef34
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_net_subnet
+short_description: NetApp ONTAP Create, delete, modify network subnets.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: Storage Engineering (@Albinpopote) <ansible@black-perl.fr>
+description:
+- Create, modify, destroy the network subnet
+options:
+ state:
+ description:
+ - Whether the specified network interface group should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ broadcast_domain:
+ description:
+ - Specify the required broadcast_domain name for the subnet.
+ - A broadcast domain can not be modified after the subnet has been created
+ type: str
+
+ name:
+ description:
+ - Specify the subnet name.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the subnet to be renamed
+ type: str
+
+ gateway:
+ description:
+ - Specify the gateway for the default route of the subnet.
+ type: str
+
+ ipspace:
+ description:
+ - Specify the ipspace for the subnet.
+ - The default value for this parameter is the default IPspace, named 'Default'.
+ type: str
+
+ ip_ranges:
+ description:
+ - Specify the list of IP address ranges associated with the subnet.
+ type: list
+ elements: str
+
+ subnet:
+ description:
+ - Specify the subnet (ip and mask).
+ type: str
+"""
+
+EXAMPLES = """
+ - name: create subnet
+ na_ontap_net_subnet:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ subnet: 10.10.10.0/24
+ name: subnet-adm
+ ip_ranges: [ '10.10.10.30-10.10.10.40', '10.10.10.51' ]
+ gateway: 10.10.10.254
+ ipspace: Default
+ broadcast_domain: Default
+ - name: delete subnet
+ na_ontap_net_subnet:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: subnet-adm
+ ipspace: Default
+ - name: rename subnet
+ na_ontap_net_subnet:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: subnet-adm-new
+ from_name: subnet-adm
+ ipspace: Default
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSubnet(object):
+ """
+ Create, Modifies and Destroys a subnet
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Subnet class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ broadcast_domain=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ ip_ranges=dict(required=False, type='list', elements='str'),
+ ipspace=dict(required=False, type='str'),
+ subnet=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_subnet(self, name=None):
+ """
+ Return details about the subnet
+ :param:
+ name : Name of the subnet
+ :return: Details about the subnet. None if not found.
+ :rtype: dict
+ """
+ if name is None:
+ name = self.parameters.get('name')
+
+ subnet_iter = netapp_utils.zapi.NaElement('net-subnet-get-iter')
+ subnet_info = netapp_utils.zapi.NaElement('net-subnet-info')
+ subnet_info.add_new_child('subnet-name', name)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(subnet_info)
+
+ subnet_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(subnet_iter, True)
+ return_value = None
+ # check if query returns the expected subnet
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ subnet_attributes = result.get_child_by_name('attributes-list').get_child_by_name('net-subnet-info')
+ broadcast_domain = subnet_attributes.get_child_content('broadcast-domain')
+ gateway = subnet_attributes.get_child_content('gateway')
+ ipspace = subnet_attributes.get_child_content('ipspace')
+ subnet = subnet_attributes.get_child_content('subnet')
+ name = subnet_attributes.get_child_content('subnet-name')
+
+ ip_ranges = []
+ if subnet_attributes.get_child_by_name('ip-ranges'):
+ range_obj = subnet_attributes.get_child_by_name('ip-ranges').get_children()
+ for elem in range_obj:
+ ip_ranges.append(elem.get_content())
+
+ return_value = {
+ 'name': name,
+ 'broadcast_domain': broadcast_domain,
+ 'gateway': gateway,
+ 'ip_ranges': ip_ranges,
+ 'ipspace': ipspace,
+ 'subnet': subnet
+ }
+
+ return return_value
+
+ def create_subnet(self):
+ """
+ Creates a new subnet
+ """
+ options = {'subnet-name': self.parameters.get('name'),
+ 'broadcast-domain': self.parameters.get('broadcast_domain'),
+ 'subnet': self.parameters.get('subnet')}
+ subnet_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-create', **options)
+
+ if self.parameters.get('gateway'):
+ subnet_create.add_new_child('gateway', self.parameters.get('gateway'))
+ if self.parameters.get('ip_ranges'):
+ subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
+ subnet_create.add_child_elem(subnet_ips)
+ for ip_range in self.parameters.get('ip_ranges'):
+ subnet_ips.add_new_child('ip-range', ip_range)
+ if self.parameters.get('ipspace'):
+ subnet_create.add_new_child('ipspace', self.parameters.get('ipspace'))
+
+ try:
+ self.server.invoke_successfully(subnet_create, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_subnet(self):
+ """
+ Deletes a subnet
+ """
+ subnet_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-destroy', **{'subnet-name': self.parameters.get('name')})
+
+ try:
+ self.server.invoke_successfully(subnet_delete, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_subnet(self):
+ """
+ Modifies a subnet
+ """
+ options = {'subnet-name': self.parameters.get('name')}
+
+ subnet_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-modify', **options)
+
+ if self.parameters.get('gateway'):
+ subnet_modify.add_new_child('gateway', self.parameters.get('gateway'))
+ if self.parameters.get('ip_ranges'):
+ subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
+ subnet_modify.add_child_elem(subnet_ips)
+ for ip_range in self.parameters.get('ip_ranges'):
+ subnet_ips.add_new_child('ip-range', ip_range)
+ if self.parameters.get('ipspace'):
+ subnet_modify.add_new_child('ipspace', self.parameters.get('ipspace'))
+ if self.parameters.get('subnet'):
+ subnet_modify.add_new_child('subnet', self.parameters.get('subnet'))
+
+ try:
+ self.server.invoke_successfully(subnet_modify, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_subnet(self):
+ """
+ TODO
+ """
+ options = {'subnet-name': self.parameters.get('from_name'),
+ 'new-name': self.parameters.get('name')}
+
+ subnet_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-rename', **options)
+
+ if self.parameters.get('ipspace'):
+ subnet_rename.add_new_child('ipspace', self.parameters.get('ipspace'))
+
+ try:
+ self.server.invoke_successfully(subnet_rename, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Apply action to subnet'''
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_net_subnet", cserver)
+ current = self.get_subnet()
+ cd_action, rename = None, None
+
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_subnet(self.parameters.get('from_name')), current)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming: subnet %s does not exist" %
+ self.parameters.get('from_name'))
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ for attribute in modify:
+ if attribute in ['broadcast_domain']:
+ self.module.fail_json(msg='Error modifying subnet %s: cannot modify broadcast_domain parameter.' % self.parameters.get('name'))
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_subnet()
+ # If rename is True, cd_action is NOne but modify could be true
+ if cd_action == 'create':
+ for attribute in ['subnet', 'broadcast_domain']:
+ if not self.parameters.get(attribute):
+ self.module.fail_json(msg='Error - missing required arguments: %s.' % attribute)
+ self.create_subnet()
+ elif cd_action == 'delete':
+ self.delete_subnet()
+ elif modify:
+ self.modify_subnet()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp ONTAP Net Route object and runs the correct play task
+ """
+ subnet_obj = NetAppOntapSubnet()
+ subnet_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py
new file mode 100644
index 00000000..fe6e9e3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_net_vlan
+short_description: NetApp ONTAP network VLAN
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create or Delete a network VLAN
+options:
+ state:
+ description:
+ - Whether the specified network VLAN should exist or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ parent_interface:
+ description:
+ - The interface that hosts the VLAN interface.
+ required: true
+ type: str
+ vlanid:
+ description:
+ - The VLAN id. Ranges from 1 to 4094.
+ required: true
+ type: str
+ node:
+ description:
+ - Node name of VLAN interface.
+ required: true
+ type: str
+notes:
+ - The C(interface_name) option has been removed and should be deleted from playbooks
+'''
+
+EXAMPLES = """
+ - name: create VLAN
+ na_ontap_net_vlan:
+ state: present
+ vlanid: 13
+ node: "{{ vlan node }}"
+ parent_interface: "{{ vlan parent interface name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVlan(object):
+ """
+ Created, and destorys Net Vlans's
+ """
+ def __init__(self):
+ """
+ Initializes the NetAppOntapVlan function
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ parent_interface=dict(required=True, type='str'),
+ vlanid=dict(required=True, type='str'),
+ node=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.parent_interface = p['parent_interface']
+ self.vlanid = p['vlanid']
+ self.node = p['node']
+ self.interface_name = str(p['parent_interface']) + '-' + str(self.vlanid)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def create_vlan(self):
+ """
+ Creates a new vlan
+ """
+ vlan_obj = netapp_utils.zapi.NaElement("net-vlan-create")
+ vlan_info = self.create_vlan_info()
+
+ vlan_obj.add_child_elem(vlan_info)
+ self.server.invoke_successfully(vlan_obj, True)
+
+ def delete_vlan(self):
+ """
+ Deletes a vland
+ """
+ vlan_obj = netapp_utils.zapi.NaElement("net-vlan-delete")
+ vlan_info = self.create_vlan_info()
+
+ vlan_obj.add_child_elem(vlan_info)
+ self.server.invoke_successfully(vlan_obj, True)
+
+ def does_vlan_exist(self):
+ """
+ Checks to see if a vlan already exists or not
+ :return: Returns True if the vlan exists, false if it dosn't
+ """
+ vlan_obj = netapp_utils.zapi.NaElement("net-vlan-get")
+ vlan_obj.add_new_child("interface-name", self.interface_name)
+ vlan_obj.add_new_child("node", self.node)
+ try:
+ result = self.server.invoke_successfully(vlan_obj, True)
+ result.get_child_by_name("attributes").get_child_by_name("vlan-info").get_child_by_name("interface-name")
+ except netapp_utils.zapi.NaApiError:
+ return False
+ return True
+
+ def create_vlan_info(self):
+ """
+ Create a vlan_info object to be used in a create/delete
+ :return:
+ """
+ vlan_info = netapp_utils.zapi.NaElement("vlan-info")
+
+ # set up the vlan_info object:
+ vlan_info.add_new_child("parent-interface", self.parent_interface)
+ vlan_info.add_new_child("vlanid", self.vlanid)
+ vlan_info.add_new_child("node", self.node)
+ return vlan_info
+
+ def apply(self):
+ """
+ check the option in the playbook to see what needs to be done
+ :return:
+ """
+ changed = False
+ result = None
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_net_vlan", cserver)
+ existing_vlan = self.does_vlan_exist()
+ if existing_vlan:
+ if self.state == 'absent': # delete
+ changed = True
+ else:
+ if self.state == 'present': # create
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ self.create_vlan()
+ elif self.state == 'absent':
+ self.delete_vlan()
+ self.module.exit_json(changed=changed, meta=result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap vlan object, and runs the correct play task.
+ """
+ v = NetAppOntapVlan()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py
new file mode 100644
index 00000000..c439cba1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py
@@ -0,0 +1,599 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = """
+module: na_ontap_nfs
+short_description: NetApp ONTAP NFS status
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Enable or disable NFS on ONTAP
+options:
+ state:
+ description:
+ - Whether NFS should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ service_state:
+ description:
+ - Whether the specified NFS should be enabled or disabled. Creates NFS service if doesnt exist.
+ choices: ['started', 'stopped']
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ nfsv3:
+ description:
+ - status of NFSv3.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv3_fsid_change:
+ description:
+ - status of if NFSv3 clients see change in FSID as they traverse filesystems.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv4_fsid_change:
+ description:
+ - status of if NFSv4 clients see change in FSID as they traverse filesystems.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv4:
+ description:
+ - status of NFSv4.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv41:
+ description:
+ - status of NFSv41.
+ aliases: ['nfsv4.1']
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv41_pnfs:
+ description:
+ - status of NFSv41 pNFS.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv4_numeric_ids:
+ description:
+ - status of NFSv4 numeric ID's.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ vstorage_state:
+ description:
+ - status of vstorage_state.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv4_id_domain:
+ description:
+ - Name of the nfsv4_id_domain to use.
+ type: str
+ nfsv40_acl:
+ description:
+ - status of NFS v4.0 ACL feature
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_read_delegation:
+ description:
+ - status for NFS v4.0 read delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_write_delegation:
+ description:
+ - status for NFS v4.0 write delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_acl:
+ description:
+ - status of NFS v4.1 ACL feature
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_read_delegation:
+ description:
+ - status for NFS v4.1 read delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_write_delegation:
+ description:
+ - status for NFS v4.1 write delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_referrals:
+ description:
+ - status for NFS v4.0 referrals.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv41_referrals:
+ description:
+ - status for NFS v4.1 referrals.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ tcp:
+ description:
+ - Enable TCP (support from ONTAP 9.3 onward).
+ choices: ['enabled', 'disabled']
+ type: str
+ udp:
+ description:
+ - Enable UDP (support from ONTAP 9.3 onward).
+ choices: ['enabled', 'disabled']
+ type: str
+ showmount:
+ description:
+ - Whether SVM allows showmount
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ tcp_max_xfer_size:
+ description:
+ - TCP Maximum Transfer Size (bytes). The default value is 65536.
+ version_added: 2.8.0
+ type: int
+
+"""
+
+EXAMPLES = """
+ - name: change nfs status
+ na_ontap_nfs:
+ state: present
+ service_state: stopped
+ vserver: vs_hack
+ nfsv3: disabled
+ nfsv4: disabled
+ nfsv41: enabled
+ tcp: disabled
+ udp: disabled
+ vstorage_state: disabled
+ nfsv4_id_domain: example.com
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNFS(object):
+ """ object initialize and class methods """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, type='str', choices=['started', 'stopped']),
+ vserver=dict(required=True, type='str'),
+ nfsv3=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv3_fsid_change=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4_fsid_change=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41=dict(required=False, type='str', default=None, choices=['enabled', 'disabled'], aliases=['nfsv4.1']),
+ nfsv41_pnfs=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4_numeric_ids=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ vstorage_state=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ tcp=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ udp=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ nfsv4_id_domain=dict(required=False, type='str', default=None),
+ nfsv40_acl=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_read_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_referrals=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_write_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_acl=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_read_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_referrals=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_write_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ showmount=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ tcp_max_xfer_size=dict(required=False, default=None, type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ parameters = self.module.params
+
+ # set up service_state variables
+ self.state = parameters['state']
+ self.service_state = parameters['service_state']
+ self.vserver = parameters['vserver']
+ self.nfsv3 = parameters['nfsv3']
+ self.nfsv3_fsid_change = parameters['nfsv3_fsid_change']
+ self.nfsv4_fsid_change = parameters['nfsv4_fsid_change']
+ self.nfsv4 = parameters['nfsv4']
+ self.nfsv41 = parameters['nfsv41']
+ self.vstorage_state = parameters['vstorage_state']
+ self.nfsv4_id_domain = parameters['nfsv4_id_domain']
+ self.udp = parameters['udp']
+ self.tcp = parameters['tcp']
+ self.nfsv40_acl = parameters['nfsv40_acl']
+ self.nfsv40_read_delegation = parameters['nfsv40_read_delegation']
+ self.nfsv40_referrals = parameters['nfsv40_referrals']
+ self.nfsv40_write_delegation = parameters['nfsv40_write_delegation']
+ self.nfsv41_acl = parameters['nfsv41_acl']
+ self.nfsv41_read_delegation = parameters['nfsv41_read_delegation']
+ self.nfsv41_referrals = parameters['nfsv41_referrals']
+ self.nfsv41_write_delegation = parameters['nfsv41_write_delegation']
+ self.nfsv41_pnfs = parameters['nfsv41_pnfs']
+ self.nfsv4_numeric_ids = parameters['nfsv4_numeric_ids']
+ self.showmount = parameters['showmount']
+ self.tcp_max_xfer_size = parameters['tcp_max_xfer_size']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_nfs_service(self):
+ """
+ Return details about nfs
+ :param:
+ name : name of the vserver
+ :return: Details about nfs. None if not found.
+ :rtype: dict
+ """
+ nfs_get_iter = netapp_utils.zapi.NaElement('nfs-service-get-iter')
+ nfs_info = netapp_utils.zapi.NaElement('nfs-info')
+ nfs_info.add_new_child('vserver', self.vserver)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(nfs_info)
+ nfs_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(nfs_get_iter, True)
+ nfs_details = None
+ # check if job exists
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list').get_child_by_name('nfs-info')
+ is_nfsv3_enabled = attributes_list.get_child_content('is-nfsv3-enabled')
+ is_nfsv3_fsid_change_enabled = attributes_list.get_child_content('is-nfsv3-fsid-change-enabled')
+ is_nfsv4_fsid_change_enabled = attributes_list.get_child_content('is-nfsv4-fsid-change-enabled')
+ is_nfsv40_enabled = attributes_list.get_child_content('is-nfsv40-enabled')
+ is_nfsv41_enabled = attributes_list.get_child_content('is-nfsv41-enabled')
+ is_vstorage_enabled = attributes_list.get_child_content('is-vstorage-enabled')
+ nfsv4_id_domain_value = attributes_list.get_child_content('nfsv4-id-domain')
+ is_tcp_enabled = attributes_list.get_child_content('is-tcp-enabled')
+ is_udp_enabled = attributes_list.get_child_content('is-udp-enabled')
+ is_nfsv40_acl_enabled = attributes_list.get_child_content('is-nfsv40-acl-enabled')
+ is_nfsv40_write_delegation_enabled = attributes_list.get_child_content('is-nfsv40-write-delegation-enabled')
+ is_nfsv40_read_delegation_enabled = attributes_list.get_child_content('is-nfsv40-read-delegation-enabled')
+ is_nfsv40_referrals_enabled = attributes_list.get_child_content('is-nfsv40-referrals-enabled')
+ is_nfsv41_acl_enabled = attributes_list.get_child_content('is-nfsv41-acl-enabled')
+ is_nfsv41_write_delegation_enabled = attributes_list.get_child_content('is-nfsv41-write-delegation-enabled')
+ is_nfsv41_read_delegation_enabled = attributes_list.get_child_content('is-nfsv41-read-delegation-enabled')
+ is_nfsv41_referrals_enabled = attributes_list.get_child_content('is-nfsv41-referrals-enabled')
+ is_nfsv41_pnfs_enabled = attributes_list.get_child_content('is-nfsv41-pnfs-enabled')
+ is_nfsv4_numeric_ids_enabled = attributes_list.get_child_content('is-nfsv4-numeric-ids-enabled')
+ is_showmount_enabled = attributes_list.get_child_content('showmount')
+ tcp_max_xfer_size = attributes_list.get_child_content('tcp-max-xfer-size')
+ nfs_details = {
+ 'is_nfsv3_enabled': is_nfsv3_enabled,
+ 'is_nfsv3_fsid_change_enabled': is_nfsv3_fsid_change_enabled,
+ 'is_nfsv4_fsid_change_enabled': is_nfsv4_fsid_change_enabled,
+ 'is_nfsv40_enabled': is_nfsv40_enabled,
+ 'is_nfsv41_enabled': is_nfsv41_enabled,
+ 'is_nfsv41_pnfs_enabled': is_nfsv41_pnfs_enabled,
+ 'is_nfsv4_numeric_ids_enabled': is_nfsv4_numeric_ids_enabled,
+ 'is_vstorage_enabled': is_vstorage_enabled,
+ 'nfsv4_id_domain': nfsv4_id_domain_value,
+ 'is_tcp_enabled': is_tcp_enabled,
+ 'is_udp_enabled': is_udp_enabled,
+ 'is_nfsv40_acl_enabled': is_nfsv40_acl_enabled,
+ 'is_nfsv40_read_delegation_enabled': is_nfsv40_read_delegation_enabled,
+ 'is_nfsv40_referrals_enabled': is_nfsv40_referrals_enabled,
+ 'is_nfsv40_write_delegation_enabled': is_nfsv40_write_delegation_enabled,
+ 'is_nfsv41_acl_enabled': is_nfsv41_acl_enabled,
+ 'is_nfsv41_read_delegation_enabled': is_nfsv41_read_delegation_enabled,
+ 'is_nfsv41_referrals_enabled': is_nfsv41_referrals_enabled,
+ 'is_nfsv41_write_delegation_enabled': is_nfsv41_write_delegation_enabled,
+ 'is_showmount_enabled': is_showmount_enabled,
+ 'tcp_max_xfer_size': tcp_max_xfer_size
+ }
+ return nfs_details
+
+ def get_nfs_status(self):
+ """
+ Return status of nfs
+ :param:
+ name : Name of the vserver
+ :return: status of nfs. None if not found.
+ :rtype: bool
+ """
+ nfs_status = netapp_utils.zapi.NaElement('nfs-status')
+ result = self.server.invoke_successfully(nfs_status, True)
+ return_value = result.get_child_content('is-enabled')
+
+ return return_value
+
+ def enable_nfs(self):
+ """
+ enable nfs (online). If the NFS service was not explicitly created,
+ this API will create one with default options.
+ """
+ nfs_enable = netapp_utils.zapi.NaElement.create_node_with_children('nfs-enable')
+ try:
+ self.server.invoke_successfully(nfs_enable,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the service_state of nfs %s to %s: %s' %
+ (self.vserver, self.service_state, to_native(error)),
+ exception=traceback.format_exc())
+
+ def disable_nfs(self):
+ """
+ disable nfs (offline).
+ """
+ nfs_disable = netapp_utils.zapi.NaElement.create_node_with_children('nfs-disable')
+ try:
+ self.server.invoke_successfully(nfs_disable,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the service_state of nfs %s to %s: %s' %
+ (self.vserver, self.service_state, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nfs(self):
+ """
+ modify nfs service
+ """
+ nfs_modify = netapp_utils.zapi.NaElement('nfs-service-modify')
+ if self.nfsv3 == 'enabled':
+ nfs_modify.add_new_child('is-nfsv3-enabled', 'true')
+ elif self.nfsv3 == 'disabled':
+ nfs_modify.add_new_child('is-nfsv3-enabled', 'false')
+ if self.nfsv3_fsid_change == 'enabled':
+ nfs_modify.add_new_child('is-nfsv3-fsid-change-enabled', 'true')
+ elif self.nfsv3_fsid_change == 'disabled':
+ nfs_modify.add_new_child('is-nfsv3-fsid-change-enabled', 'false')
+ if self.nfsv4_fsid_change == 'enabled':
+ nfs_modify.add_new_child('is-nfsv4-fsid-change-enabled', 'true')
+ elif self.nfsv4_fsid_change == 'disabled':
+ nfs_modify.add_new_child('is-nfsv4-fsid-change-enabled', 'false')
+ if self.nfsv4 == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-enabled', 'true')
+ elif self.nfsv4 == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-enabled', 'false')
+ if self.nfsv41 == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-enabled', 'true')
+ elif self.nfsv41 == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-enabled', 'false')
+ if self.vstorage_state == 'enabled':
+ nfs_modify.add_new_child('is-vstorage-enabled', 'true')
+ elif self.vstorage_state == 'disabled':
+ nfs_modify.add_new_child('is-vstorage-enabled', 'false')
+ if self.tcp == 'enabled':
+ nfs_modify.add_new_child('is-tcp-enabled', 'true')
+ elif self.tcp == 'disabled':
+ nfs_modify.add_new_child('is-tcp-enabled', 'false')
+ if self.udp == 'enabled':
+ nfs_modify.add_new_child('is-udp-enabled', 'true')
+ elif self.udp == 'disabled':
+ nfs_modify.add_new_child('is-udp-enabled', 'false')
+ if self.nfsv40_acl == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-acl-enabled', 'true')
+ elif self.nfsv40_acl == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-acl-enabled', 'false')
+ if self.nfsv40_read_delegation == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-read-delegation-enabled', 'true')
+ elif self.nfsv40_read_delegation == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-read-delegation-enabled', 'false')
+ if self.nfsv40_referrals == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-referrals-enabled', 'true')
+ elif self.nfsv40_referrals == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-referrals-enabled', 'false')
+ if self.nfsv40_write_delegation == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-write-delegation-enabled', 'true')
+ elif self.nfsv40_write_delegation == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-write-delegation-enabled', 'false')
+ if self.nfsv41_acl == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-acl-enabled', 'true')
+ elif self.nfsv41_acl == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-acl-enabled', 'false')
+ if self.nfsv41_read_delegation == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-read-delegation-enabled', 'true')
+ elif self.nfsv41_read_delegation == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-read-delegation-enabled', 'false')
+ if self.nfsv41_referrals == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-referrals-enabled', 'true')
+ elif self.nfsv41_referrals == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-referrals-enabled', 'false')
+ if self.nfsv41_write_delegation == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-write-delegation-enabled', 'true')
+ elif self.nfsv41_write_delegation == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-write-delegation-enabled', 'false')
+ if self.nfsv41_pnfs == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-pnfs-enabled', 'true')
+ elif self.nfsv41_pnfs == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-pnfs-enabled', 'false')
+ if self.nfsv4_numeric_ids == 'enabled':
+ nfs_modify.add_new_child('is-nfsv4-numeric-ids-enabled', 'true')
+ elif self.nfsv4_numeric_ids == 'disabled':
+ nfs_modify.add_new_child('is-nfsv4-numeric-ids-enabled', 'false')
+ if self.showmount == 'enabled':
+ nfs_modify.add_new_child('showmount', 'true')
+ elif self.showmount == 'disabled':
+ nfs_modify.add_new_child('showmount', 'false')
+ if self.tcp_max_xfer_size is not None:
+ nfs_modify.add_new_child('tcp-max-xfer-size', str(self.tcp_max_xfer_size))
+ try:
+ self.server.invoke_successfully(nfs_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying nfs: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nfsv4_id_domain(self):
+ """
+ modify nfs service
+ """
+ nfsv4_id_domain_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'nfs-service-modify', **{'nfsv4-id-domain': self.nfsv4_id_domain})
+ if nfsv4_id_domain_modify is not None:
+ try:
+ self.server.invoke_successfully(nfsv4_id_domain_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying nfs: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_nfs(self):
+ """
+ delete nfs service.
+ """
+ nfs_delete = netapp_utils.zapi.NaElement.create_node_with_children('nfs-service-destroy')
+ try:
+ self.server.invoke_successfully(nfs_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting nfs: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """Apply action to nfs"""
+ changed = False
+ nfs_exists = False
+ modify_nfs = False
+ enable_nfs = False
+ disable_nfs = False
+ netapp_utils.ems_log_event("na_ontap_nfs", self.server)
+ nfs_enabled = self.get_nfs_status()
+ nfs_service_details = self.get_nfs_service()
+ is_nfsv4_id_domain_changed = False
+
+ def state_changed(expected, current):
+ if expected == "enabled" and current == "true":
+ return False
+ if expected == "disabled" and current == "false":
+ return False
+ return True
+
+ def is_modify_needed():
+ if (((self.nfsv3 is not None) and state_changed(self.nfsv3, nfs_service_details['is_nfsv3_enabled'])) or
+ ((self.nfsv3_fsid_change is not None) and state_changed(self.nfsv3_fsid_change, nfs_service_details['is_nfsv3_fsid_change_enabled'])) or
+ ((self.nfsv4_fsid_change is not None) and state_changed(self.nfsv4_fsid_change, nfs_service_details['is_nfsv4_fsid_change_enabled'])) or
+ ((self.nfsv4 is not None) and state_changed(self.nfsv4, nfs_service_details['is_nfsv40_enabled'])) or
+ ((self.nfsv41 is not None) and state_changed(self.nfsv41, nfs_service_details['is_nfsv41_enabled'])) or
+ ((self.nfsv41_pnfs is not None) and state_changed(self.nfsv41_pnfs, nfs_service_details['is_nfsv41_pnfs_enabled'])) or
+ ((self.nfsv4_numeric_ids is not None) and state_changed(self.nfsv4_numeric_ids, nfs_service_details['is_nfsv4_numeric_ids_enabled'])) or
+ ((self.tcp is not None) and state_changed(self.tcp, nfs_service_details['is_tcp_enabled'])) or
+ ((self.udp is not None) and state_changed(self.udp, nfs_service_details['is_udp_enabled'])) or
+ ((self.nfsv40_acl is not None) and state_changed(self.nfsv40_acl, nfs_service_details['is_nfsv40_acl_enabled'])) or
+ ((self.nfsv40_read_delegation is not None) and state_changed(self.nfsv40_read_delegation,
+ nfs_service_details['is_nfsv40_read_delegation_enabled'])) or
+ ((self.nfsv40_write_delegation is not None) and state_changed(self.nfsv40_write_delegation,
+ nfs_service_details['is_nfsv40_write_delegation_enabled'])) or
+ ((self.nfsv41_acl is not None) and state_changed(self.nfsv41_acl, nfs_service_details['is_nfsv41_acl_enabled'])) or
+ ((self.nfsv41_read_delegation is not None) and state_changed(self.nfsv41_read_delegation,
+ nfs_service_details['is_nfsv41_read_delegation_enabled'])) or
+ ((self.nfsv41_write_delegation is not None) and state_changed(self.nfsv41_write_delegation,
+ nfs_service_details['is_nfsv41_write_delegation_enabled'])) or
+ ((self.nfsv40_referrals is not None) and state_changed(self.nfsv40_referrals,
+ nfs_service_details['is_nfsv40_referrals_enabled'])) or
+ ((self.nfsv41_referrals is not None) and state_changed(self.nfsv41_referrals,
+ nfs_service_details['is_nfsv41_referrals_enabled'])) or
+ ((self.showmount is not None) and state_changed(self.showmount, nfs_service_details['is_showmount_enabled'])) or
+ ((self.vstorage_state is not None) and state_changed(self.vstorage_state, nfs_service_details['is_vstorage_enabled'])) or
+ ((self.tcp_max_xfer_size is not None) and int(self.tcp_max_xfer_size) != int(nfs_service_details['tcp_max_xfer_size']))):
+ return True
+ return False
+
+ def is_domain_changed():
+ if (self.nfsv4_id_domain is not None) and (self.nfsv4_id_domain != nfs_service_details['nfsv4_id_domain']):
+ return True
+ return False
+
+ if nfs_service_details:
+ nfs_exists = True
+ if self.state == 'absent': # delete
+ changed = True
+ elif self.state == 'present': # modify
+ if self.service_state == 'started' and nfs_enabled == 'false':
+ enable_nfs = True
+ changed = True
+ elif self.service_state == 'stopped' and nfs_enabled == 'true':
+ disable_nfs = True
+ changed = True
+ if is_modify_needed():
+ modify_nfs = True
+ changed = True
+ if is_domain_changed():
+ is_nfsv4_id_domain_changed = True
+ changed = True
+ else:
+ if self.state == 'present': # create
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present': # execute create
+ if not nfs_exists:
+ self.enable_nfs()
+ nfs_service_details = self.get_nfs_service()
+ if self.service_state == 'stopped':
+ self.disable_nfs()
+ if is_modify_needed():
+ self.modify_nfs()
+ if is_domain_changed():
+ self.modify_nfsv4_id_domain()
+ else:
+ if enable_nfs:
+ self.enable_nfs()
+ elif disable_nfs:
+ self.disable_nfs()
+ if modify_nfs:
+ self.modify_nfs()
+ if is_nfsv4_id_domain_changed:
+ self.modify_nfsv4_id_domain()
+ elif self.state == 'absent': # execute delete
+ self.delete_nfs()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """ Create object and call apply """
+ obj = NetAppONTAPNFS()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py
new file mode 100644
index 00000000..89f6e98f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_node
+short_description: NetApp ONTAP Rename a node.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Rename an ONTAP node.
+options:
+ name:
+ description:
+ - The new name for the node
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - The name of the node to be renamed. If I(name) already exists, no action will be performed.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: rename node
+ na_ontap_node:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ from_name: laurentn-vsim1
+ name: laurentncluster-2
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNode(object):
+ """
+ Rename node
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ from_name=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def rename_node(self):
+ """
+ Rename an existing node
+ :return: none
+ """
+ node_obj = netapp_utils.zapi.NaElement('system-node-rename')
+ node_obj.add_new_child('node', self.parameters['from_name'])
+ node_obj.add_new_child('new-name', self.parameters['name'])
+ try:
+ self.cluster.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating node: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_node(self, name):
+ node_obj = netapp_utils.zapi.NaElement('system-node-get')
+ node_obj.add_new_child('node', name)
+ try:
+ self.cluster.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "13115":
+ # 13115 (EINVALIDINPUTERROR) if the node does not exist
+ return None
+ else:
+ self.module.fail_json(msg=to_native(
+ error), exception=traceback.format_exc())
+ return True
+
+ def apply(self):
+ # logging ems event
+ results = netapp_utils.get_cserver(self.cluster)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_node", cserver)
+
+ exists = self.get_node(self.parameters['name'])
+ from_exists = self.get_node(self.parameters['from_name'])
+ changed = False
+ if exists:
+ pass
+ else:
+ if from_exists:
+ if not self.module.check_mode:
+ self.rename_node()
+ changed = True
+ else:
+ self.module.fail_json(msg='Error renaming node, from_name %s does not exist' % self.parameters['from_name'])
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Start, Stop and Enable node services.
+ """
+ obj = NetAppOntapNode()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py
new file mode 100644
index 00000000..0b535992
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+module: na_ontap_ntfs_dacl
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp Ontap create, delate or modify NTFS DACL (discretionary access control list)
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+- Create, modify, or destroy a NTFS DACL
+
+options:
+ state:
+ description:
+ - Whether the specified NTFS DACL should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the NTFS DACL.
+ required: true
+ type: str
+
+ security_descriptor:
+ description:
+ - Specifies the NTFS security descriptor.
+ required: true
+ type: str
+
+ access_type:
+ description:
+ - Specifies DACL ACE's access type. Possible values.
+ choices: ['allow', 'deny']
+ required: true
+ type: str
+
+ account:
+ description:
+ - Specifies DACL ACE's SID or domain account name of NTFS security descriptor.
+ required: true
+ type: str
+
+ rights:
+ description:
+ - Specifies DACL ACE's access rights. Mutually exclusive with advanced_access_rights.
+ choices: ['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write']
+ type: str
+
+ apply_to:
+ description:
+ - Specifies apply DACL entry.
+ choices: ['this_folder', 'sub_folders', 'files']
+ type: list
+ elements: str
+
+ advanced_access_rights:
+ description:
+ - Specifies DACL ACE's Advanced access rights. Mutually exclusive with rights.
+ choices: ['read_data', 'write_data', 'append_data', 'read_ea', 'write_ea', 'execute_file', 'delete_child',
+ 'read_attr', 'write_attr', 'delete', 'read_perm', 'write_perm', 'write_owner', 'full_control']
+ type: list
+ elements: str
+
+"""
+
+EXAMPLES = """
+ - name: Add NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: present
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ access_type: allow
+ account: DOMAIN\\Account
+ rights: modify
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+
+ - name: Modify NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: present
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ access_type: full_control
+ account: DOMAIN\\Account
+ rights: modify
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Remove NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: absent
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ account: DOMAIN\\Account
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNtfsDacl(object):
+ """
+ Creates, Modifies and Destroys an NTFS DACL
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap NTFS DACL class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ security_descriptor=dict(required=True, type='str'),
+ access_type=dict(required=True, choices=['allow', 'deny'], type='str'),
+ account=dict(required=True, type='str'),
+ rights=dict(required=False,
+ choices=['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'],
+ type='str'),
+ apply_to=dict(required=False, choices=['this_folder', 'sub_folders', 'files'], type='list', elements='str'),
+ advanced_access_rights=dict(required=False,
+ choices=['read_data', 'write_data', 'append_data', 'read_ea', 'write_ea',
+ 'execute_file', 'delete_child', 'read_attr', 'write_attr', 'delete',
+ 'read_perm', 'write_perm', 'write_owner', 'full_control'],
+ type='list', elements='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('rights', 'advanced_access_rights')],
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_dacl(self):
+
+ dacl_entry = None
+ advanced_access_list = None
+
+ dacl_get_iter = netapp_utils.zapi.NaElement('file-directory-security-ntfs-dacl-get-iter')
+ dacl_info = netapp_utils.zapi.NaElement('file-directory-security-ntfs-dacl')
+ dacl_info.add_new_child('vserver', self.parameters['vserver'])
+ dacl_info.add_new_child('ntfs-sd', self.parameters['security_descriptor'])
+ dacl_info.add_new_child('access-type', self.parameters['access_type'])
+ dacl_info.add_new_child('account', self.parameters['account'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(dacl_info)
+ dacl_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(dacl_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'],
+ to_native(error)), exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+
+ if attributes_list is None:
+ return None
+
+ dacl = attributes_list.get_child_by_name('file-directory-security-ntfs-dacl')
+
+ apply_to_list = []
+ apply_to = dacl.get_child_by_name('apply-to')
+ for apply_child in apply_to.get_children():
+ inheritance_level = apply_child.get_content()
+
+ apply_to_list.append(inheritance_level)
+
+ if dacl.get_child_by_name('advanced-rights'):
+
+ advanced_access_list = []
+ advanced_access = dacl.get_child_by_name('advanced-rights')
+ for right in advanced_access.get_children():
+ advanced_access_right = right.get_content()
+ advanced_right = {
+ 'advanced_access_rights': advanced_access_right
+ }
+ advanced_access_list.append(advanced_right)
+
+ dacl_entry = {
+ 'access_type': dacl.get_child_content('access-type'),
+ 'account': dacl.get_child_content('account'),
+ 'apply_to': apply_to_list,
+ 'security_descriptor': dacl.get_child_content('ntfs-sd'),
+ 'readable_access_rights': dacl.get_child_content('readable-access-rights'),
+ 'vserver': dacl.get_child_content('vserver'),
+ }
+
+ if advanced_access_list is not None:
+ dacl_entry['advanced_rights'] = advanced_access_list
+ else:
+ dacl_entry['rights'] = dacl.get_child_content('rights')
+ return dacl_entry
+
+ def add_dacl(self):
+ """
+ Adds a new NTFS DACL to an existing NTFS security descriptor
+ """
+
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-add")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ if 'rights' not in self.parameters.keys() and 'advanced_access_rights' not in self.parameters.keys():
+ self.module.fail_json(msg='Either rights or advanced_access_rights must be specified.')
+
+ if self.parameters.get('apply_to'):
+ apply_to_obj = netapp_utils.zapi.NaElement("apply-to")
+
+ for apply_entry in self.parameters['apply_to']:
+ apply_to_obj.add_new_child('inheritance-level', apply_entry)
+ dacl_obj.add_child_elem(apply_to_obj)
+
+ if self.parameters.get('advanced_access_rights'):
+ access_rights_obj = netapp_utils.zapi.NaElement("advanced-rights")
+
+ for right in self.parameters['advanced_access_rights']:
+ access_rights_obj.add_new_child('advanced-access-rights', right)
+
+ dacl_obj.add_child_elem(access_rights_obj)
+
+ if self.parameters.get('rights'):
+ dacl_obj.add_new_child("rights", self.parameters['rights'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_dacl(self):
+ """
+ Deletes a NTFS DACL from an existing NTFS security descriptor
+ """
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-remove")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_dacl(self):
+ """
+ Modifies a NTFS DACL on an existing NTFS security descriptor
+ """
+
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-modify")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ if self.parameters.get('apply_to'):
+ apply_to_obj = netapp_utils.zapi.NaElement("apply-to")
+
+ for apply_entry in self.parameters['apply_to']:
+ apply_to_obj.add_new_child('inheritance-level', apply_entry)
+ dacl_obj.add_child_elem(apply_to_obj)
+
+ if self.parameters.get('advanced_access_rights'):
+ access_rights_obj = netapp_utils.zapi.NaElement("advanced-rights")
+
+ for right in self.parameters['advanced_access_rights']:
+ access_rights_obj.add_new_child('advanced-access-rights', right)
+
+ dacl_obj.add_child_elem(access_rights_obj)
+
+ if self.parameters.get('rights'):
+ dacl_obj.add_new_child("rights", self.parameters['rights'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_ntfs_dacl", cserver)
+
+ def apply(self):
+ self.autosupport_log()
+ current, modify = self.get_dacl(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.add_dacl()
+ elif cd_action == 'delete':
+ self.remove_dacl()
+ elif modify:
+ self.modify_dacl()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap NTFS DACL object and runs the correct play task
+ """
+ obj = NetAppOntapNtfsDacl()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py
new file mode 100644
index 00000000..fa517333
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+
+module: na_ontap_ntfs_sd
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP create, delete or modify NTFS security descriptor
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Create, modify or destroy NTFS security descriptor
+
+options:
+ state:
+ description:
+ - Whether the specified NTFS security descriptor should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the NTFS security descriptor.
+ required: true
+ type: str
+
+ name:
+ description:
+ - Specifies the NTFS security descriptor name. Not modifiable.
+ required: true
+ type: str
+
+ owner:
+ description:
+ - Specifies the owner's SID or domain account of the NTFS security descriptor.
+ - Need to provide the full path of the owner.
+ type: str
+
+ group:
+ description:
+ - Specifies the group's SID or domain account of the NTFS security descriptor.
+ - Need to provide the full path of the group.
+ required: false
+ type: str
+
+ control_flags_raw:
+ description:
+ - Specifies the security descriptor control flags.
+ - 1... .... .... .... = Self Relative
+ - .0.. .... .... .... = RM Control Valid
+ - ..0. .... .... .... = SACL Protected
+ - ...0 .... .... .... = DACL Protected
+ - .... 0... .... .... = SACL Inherited
+ - .... .0.. .... .... = DACL Inherited
+ - .... ..0. .... .... = SACL Inherit Required
+ - .... ...0 .... .... = DACL Inherit Required
+ - .... .... ..0. .... = SACL Defaulted
+ - .... .... ...0 .... = SACL Present
+ - .... .... .... 0... = DACL Defaulted
+ - .... .... .... .1.. = DACL Present
+ - .... .... .... ..0. = Group Defaulted
+ - .... .... .... ...0 = Owner Defaulted
+ - At present only the following flags are honored. Others are ignored.
+ - ..0. .... .... .... = SACL Protected
+ - ...0 .... .... .... = DACL Protected
+ - .... .... ..0. .... = SACL Defaulted
+ - .... .... .... 0... = DACL Defaulted
+ - .... .... .... ..0. = Group Defaulted
+ - .... .... .... ...0 = Owner Defaulted
+ - Convert the 16 bit binary flags and convert to decimal for the input.
+ type: int
+
+"""
+
+EXAMPLES = """
+ - name: Create NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: present
+ vserver: SVM1
+ name: ansible_sd
+ owner: DOMAIN\\Account
+ group: DOMAIN\\Group
+ control_flags_raw: 0
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: present
+ vserver: SVM1
+ name: ansible_sd
+ owner: DOMAIN\\Account
+ group: DOMAIN\\Group
+ control_flags_raw: 0
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: absent
+ vserver: SVM1
+ name: ansible_sd
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNtfsSd(object):
+ """
+ Creates, Modifies and Destroys a NTFS security descriptor
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap NTFS Security Descriptor class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ owner=dict(required=False, type='str'),
+ group=dict(required=False, type='str'),
+ control_flags_raw=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ntfs_sd(self):
+
+ ntfs_sd_entry, result = None, None
+
+ ntfs_sd_get_iter = netapp_utils.zapi.NaElement('file-directory-security-ntfs-get-iter')
+ ntfs_sd_info = netapp_utils.zapi.NaElement('file-directory-security-ntfs')
+ ntfs_sd_info.add_new_child('vserver', self.parameters['vserver'])
+ ntfs_sd_info.add_new_child('ntfs-sd', self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(ntfs_sd_info)
+ ntfs_sd_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(ntfs_sd_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching NTFS security descriptor %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ ntfs_sd = attributes_list.get_child_by_name('file-directory-security-ntfs')
+ ntfs_sd_entry = {
+ 'vserver': ntfs_sd.get_child_content('vserver'),
+ 'name': ntfs_sd.get_child_content('ntfs-sd'),
+ 'owner': ntfs_sd.get_child_content('owner'),
+ 'group': ntfs_sd.get_child_content('group'),
+ 'control_flags_raw': ntfs_sd.get_child_content('control-flags-raw'),
+ }
+ if ntfs_sd_entry.get('control_flags_raw'):
+ ntfs_sd_entry['control_flags_raw'] = int(ntfs_sd_entry['control_flags_raw'])
+ return ntfs_sd_entry
+ return None
+
+ def add_ntfs_sd(self):
+ """
+ Adds a new NTFS security descriptor
+ """
+
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-create")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+
+ if self.parameters.get('control_flags_raw') is not None:
+ ntfs_sd_obj.add_new_child("control-flags-raw", str(self.parameters['control_flags_raw']))
+
+ if self.parameters.get('owner'):
+ ntfs_sd_obj.add_new_child("owner", self.parameters['owner'])
+
+ if self.parameters.get('group'):
+ ntfs_sd_obj.add_new_child("group", self.parameters['group'])
+
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_ntfs_sd(self):
+ """
+ Deletes a NTFS security descriptor
+ """
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-delete")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ntfs_sd(self):
+ """
+ Modifies a NTFS security descriptor
+ """
+
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-modify")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+
+ if self.parameters.get('control_flags_raw') is not None:
+ ntfs_sd_obj.add_new_child('control-flags-raw', str(self.parameters['control_flags_raw']))
+
+ if self.parameters.get('owner'):
+ ntfs_sd_obj.add_new_child('owner', self.parameters['owner'])
+
+ if self.parameters.get('group'):
+ ntfs_sd_obj.add_new_child('group', self.parameters['group'])
+
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error modifying NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_ntfs_sd", self.server)
+ current, modify = self.get_ntfs_sd(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.add_ntfs_sd()
+ elif cd_action == 'delete':
+ self.remove_ntfs_sd()
+ elif modify:
+ self.modify_ntfs_sd()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates, deletes and modifies NTFS secudity descriptor
+ """
+ obj = NetAppOntapNtfsSd()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py
new file mode 100644
index 00000000..e7f88518
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = """
+module: na_ontap_ntp
+short_description: NetApp ONTAP NTP server
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create or delete or modify NTP server in ONTAP
+options:
+ state:
+ description:
+ - Whether the specified NTP server should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ server_name:
+ description:
+ - The name of the NTP server to manage.
+ required: True
+ type: str
+ version:
+ description:
+ - give version for NTP server
+ choices: ['auto', '3', '4']
+ default: 'auto'
+ type: str
+"""
+
+EXAMPLES = """
+ - name: Create NTP server
+ na_ontap_ntp:
+ state: present
+ version: auto
+ server_name: "{{ server_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete NTP server
+ na_ontap_ntp:
+ state: absent
+ server_name: "{{ server_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNTPServer(object):
+ """ object initialize and class methods """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ server_name=dict(required=True, type='str'),
+ version=dict(required=False, type='str', default='auto',
+ choices=['auto', '3', '4']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ parameters = self.module.params
+
+ # set up state variables
+ self.state = parameters['state']
+ self.server_name = parameters['server_name']
+ self.version = parameters['version']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_ntp_server(self):
+ """
+ Return details about the ntp server
+ :param:
+ name : Name of the server_name
+ :return: Details about the ntp server. None if not found.
+ :rtype: dict
+ """
+ ntp_iter = netapp_utils.zapi.NaElement('ntp-server-get-iter')
+ ntp_info = netapp_utils.zapi.NaElement('ntp-server-info')
+ ntp_info.add_new_child('server-name', self.server_name)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(ntp_info)
+
+ ntp_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(ntp_iter, True)
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ ntp_server_name = result.get_child_by_name('attributes-list').\
+ get_child_by_name('ntp-server-info').\
+ get_child_content('server-name')
+ server_version = result.get_child_by_name('attributes-list').\
+ get_child_by_name('ntp-server-info').\
+ get_child_content('version')
+ return_value = {
+ 'server-name': ntp_server_name,
+ 'version': server_version
+ }
+
+ return return_value
+
+ def create_ntp_server(self):
+ """
+ create ntp server.
+ """
+ ntp_server_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-create', **{'server-name': self.server_name,
+ 'version': self.version
+ })
+
+ try:
+ self.server.invoke_successfully(ntp_server_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating ntp server %s: %s'
+ % (self.server_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ntp_server(self):
+ """
+ delete ntp server.
+ """
+ ntp_server_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-delete', **{'server-name': self.server_name})
+
+ try:
+ self.server.invoke_successfully(ntp_server_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting ntp server %s: %s'
+ % (self.server_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_version(self):
+ """
+ modify the version.
+ """
+ ntp_modify_versoin = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-modify',
+ **{'server-name': self.server_name, 'version': self.version})
+ try:
+ self.server.invoke_successfully(ntp_modify_versoin,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying version for ntp server %s: %s'
+ % (self.server_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """Apply action to ntp-server"""
+
+ changed = False
+ ntp_modify = False
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_ntp", cserver)
+ ntp_server_details = self.get_ntp_server()
+ if ntp_server_details is not None:
+ if self.state == 'absent': # delete
+ changed = True
+ elif self.state == 'present' and self.version:
+ # modify version
+ if self.version != ntp_server_details['version']:
+ ntp_modify = True
+ changed = True
+ else:
+ if self.state == 'present': # create
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if ntp_server_details is None:
+ self.create_ntp_server()
+ elif ntp_modify:
+ self.modify_version()
+ elif self.state == 'absent':
+ self.delete_ntp_server()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """ Create object and call apply """
+ ntp_obj = NetAppOntapNTPServer()
+ ntp_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py
new file mode 100644
index 00000000..055889e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVMe Service
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified NVMe should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ status_admin:
+ description:
+ - Whether the status of NVMe should be up or down
+ type: bool
+short_description: "NetApp ONTAP Manage NVMe Service"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVMe
+ na_ontap_nvme:
+ state: present
+ status_admin: False
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NVMe
+ na_ontap_nvme:
+ state: present
+ status_admin: True
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete NVMe
+ na_ontap_nvme:
+ state: absent
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNVMe(object):
+ """
+ Class with NVMe service methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ status_admin=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_nvme(self):
+ """
+ Get current nvme details
+ :return: dict if nvme exists, None otherwise
+ """
+ nvme_get = netapp_utils.zapi.NaElement('nvme-get-iter')
+ query = {
+ 'query': {
+ 'nvme-target-service-info': {
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ nvme_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(nvme_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching nvme info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ nvme_info = attributes_list.get_child_by_name('nvme-target-service-info')
+ return_value = {'status_admin': nvme_info.get_child_content('is-available')}
+ return return_value
+ return None
+
+ def create_nvme(self):
+ """
+ Create NVMe service
+ """
+ nvme_create = netapp_utils.zapi.NaElement('nvme-create')
+ if self.parameters.get('status_admin') is not None:
+ options = {'is-available': self.parameters['status_admin']}
+ nvme_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(nvme_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_nvme(self):
+ """
+ Delete NVMe service
+ """
+ nvme_delete = netapp_utils.zapi.NaElement('nvme-delete')
+ try:
+ self.server.invoke_successfully(nvme_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nvme(self, status=None):
+ """
+ Modify NVMe service
+ """
+ if status is None:
+ status = self.parameters['status_admin']
+ options = {'is-available': status}
+ nvme_modify = netapp_utils.zapi.NaElement('nvme-modify')
+ nvme_modify.translate_struct(options)
+ try:
+ self.server.invoke_successfully(nvme_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to NVMe service
+ """
+ netapp_utils.ems_log_event("na_ontap_nvme", self.server)
+ current = self.get_nvme()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters.get('status_admin') is not None:
+ self.parameters['status_admin'] = self.na_helper.get_value_for_bool(False, self.parameters['status_admin'])
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_nvme()
+ elif cd_action == 'delete':
+ # NVMe status_admin needs to be down before deleting it
+ self.modify_nvme('false')
+ self.delete_nvme()
+ elif modify:
+ self.modify_nvme()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMe()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py
new file mode 100644
index 00000000..e58ea581
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVME namespace
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme_namespace
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified namespace should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ ostype:
+ description:
+ - Specifies the ostype for initiators
+ choices: ['windows', 'linux', 'vmware', 'xen', 'hyper_v']
+ type: str
+ size:
+ description:
+ - Size in bytes.
+ Range is [0..2^63-1].
+ type: int
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: 'b'
+ path:
+ description:
+ - Namespace path.
+ required: true
+ type: str
+ block_size:
+ description:
+ - Size in bytes of a logical block. Possible values are 512 (Data ONTAP 9.6 and later), 4096. The default value is 4096.
+ choices: [512, 4096]
+ type: int
+ version_added: '20.5.0'
+short_description: "NetApp ONTAP Manage NVME Namespace"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVME Namespace
+ na_ontap_nvme_namespace:
+ state: present
+ ostype: linux
+ path: /vol/ansible/test
+ size: 20
+ size_unit: mb
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Create NVME Namespace (Idempotency)
+ na_ontap_nvme_namespace:
+ state: present
+ ostype: linux
+ path: /vol/ansible/test
+ size: 20
+ size_unit: mb
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNVMENamespace(object):
+ """
+ Class with NVME namespace methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ ostype=dict(required=False, type='str', choices=['windows', 'linux', 'vmware', 'xen', 'hyper_v']),
+ path=dict(required=True, type='str'),
+ size=dict(required=False, type='int'),
+ size_unit=dict(default='b', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'),
+ block_size=dict(required=False, choices=[512, 4096], type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['ostype', 'size'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * \
+ netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_namespace(self):
+ """
+ Get current namespace details
+ :return: dict if namespace exists, None otherwise
+ """
+ namespace_get = netapp_utils.zapi.NaElement('nvme-namespace-get-iter')
+ query = {
+ 'query': {
+ 'nvme-namespace-info': {
+ 'path': self.parameters['path'],
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ namespace_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(namespace_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching namespace info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result
+ return None
+
+ def create_namespace(self):
+ """
+ Create a NVME Namespace
+ """
+ options = {'path': self.parameters['path'],
+ 'ostype': self.parameters['ostype'],
+ 'size': self.parameters['size']
+ }
+ if self.parameters.get('block_size'):
+ options['block-size'] = self.parameters['block_size']
+ namespace_create = netapp_utils.zapi.NaElement('nvme-namespace-create')
+ namespace_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(namespace_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating namespace for path %s: %s'
+ % (self.parameters.get('path'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_namespace(self):
+ """
+ Delete a NVME Namespace
+ """
+ options = {'path': self.parameters['path']
+ }
+ namespace_delete = netapp_utils.zapi.NaElement.create_node_with_children('nvme-namespace-delete', **options)
+ try:
+ self.server.invoke_successfully(namespace_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting namespace for path %s: %s'
+ % (self.parameters.get('path'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to NVME Namespace
+ """
+ netapp_utils.ems_log_event("na_ontap_nvme_namespace", self.server)
+ current = self.get_namespace()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_namespace()
+ elif cd_action == 'delete':
+ self.delete_namespace()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMENamespace()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py
new file mode 100644
index 00000000..5229c6e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVME subsystem
+ - Associate(modify) host/map to NVME subsystem
+ - NVMe service should be existing in the data vserver with NVMe protocol as a pre-requisite
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme_subsystem
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified subsystem should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ subsystem:
+ description:
+ - Specifies the subsystem
+ required: true
+ type: str
+ ostype:
+ description:
+ - Specifies the ostype for initiators
+ choices: ['windows', 'linux', 'vmware', 'xen', 'hyper_v']
+ type: str
+ skip_host_check:
+ description:
+ - Skip host check
+ - Required to delete an NVMe Subsystem with attached NVMe namespaces
+ default: false
+ type: bool
+ skip_mapped_check:
+ description:
+ - Skip mapped namespace check
+ - Required to delete an NVMe Subsystem with attached NVMe namespaces
+ default: false
+ type: bool
+ hosts:
+ description:
+ - List of host NQNs (NVMe Qualification Name) associated to the controller.
+ type: list
+ elements: str
+ paths:
+ description:
+ - List of Namespace paths to be associated with the subsystem.
+ type: list
+ elements: str
+short_description: "NetApp ONTAP Manage NVME Subsystem"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVME Subsystem
+ na_ontap_nvme_subsystem:
+ state: present
+ subsystem: test_sub
+ vserver: test_dest
+ ostype: linux
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete NVME Subsystem
+ na_ontap_nvme_subsystem:
+ state: absent
+ subsystem: test_sub
+ vserver: test_dest
+ skip_host_check: True
+ skip_mapped_check: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Associate NVME Subsystem host/map
+ na_ontap_nvme_subsystem:
+ state: present
+ subsystem: "{{ subsystem }}"
+ ostype: linux
+ hosts: nqn.1992-08.com.netapp:sn.3017cfc1e2ba11e89c55005056b36338:subsystem.ansible
+ paths: /vol/ansible/test,/vol/ansible/test1
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NVME subsystem map
+ na_ontap_nvme_subsystem:
+ state: present
+ subsystem: test_sub
+ vserver: test_dest
+ skip_host_check: True
+ skip_mapped_check: True
+ paths: /vol/ansible/test
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNVMESubsystem(object):
+ """
+ Class with NVME subsytem methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ subsystem=dict(required=True, type='str'),
+ ostype=dict(required=False, type='str', choices=['windows', 'linux', 'vmware', 'xen', 'hyper_v']),
+ skip_host_check=dict(required=False, type='bool', default=False),
+ skip_mapped_check=dict(required=False, type='bool', default=False),
+ hosts=dict(required=False, type='list', elements='str'),
+ paths=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_subsystem(self):
+ """
+ Get current subsystem details
+ :return: dict if subsystem exists, None otherwise
+ """
+ subsystem_get = netapp_utils.zapi.NaElement('nvme-subsystem-get-iter')
+ query = {
+ 'query': {
+ 'nvme-subsystem-info': {
+ 'subsystem': self.parameters.get('subsystem'),
+ 'vserver': self.parameters.get('vserver')
+ }
+ }
+ }
+ subsystem_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(subsystem_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching subsystem info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ return None
+
+ def create_subsystem(self):
+ """
+ Create a NVME Subsystem
+ """
+ if self.parameters.get('ostype') is None:
+ self.module.fail_json(msg="Error: Missing required parameter 'os_type' for creating subsystem")
+ options = {'subsystem': self.parameters['subsystem'],
+ 'ostype': self.parameters['ostype']
+ }
+ subsystem_create = netapp_utils.zapi.NaElement('nvme-subsystem-create')
+ subsystem_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(subsystem_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating subsystem for %s: %s'
+ % (self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_subsystem(self):
+ """
+ Delete a NVME subsystem
+ """
+ options = {'subsystem': self.parameters['subsystem'],
+ 'skip-host-check': 'true' if self.parameters.get('skip_host_check') else 'false',
+ 'skip-mapped-check': 'true' if self.parameters.get('skip_mapped_check') else 'false',
+ }
+ subsystem_delete = netapp_utils.zapi.NaElement.create_node_with_children('nvme-subsystem-delete', **options)
+ try:
+ self.server.invoke_successfully(subsystem_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting subsystem for %s: %s'
+ % (self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_subsystem_host_map(self, type):
+ """
+ Get current subsystem host details
+ :return: list if host exists, None otherwise
+ """
+ if type == 'hosts':
+ zapi_get, zapi_info, zapi_type = 'nvme-subsystem-host-get-iter', 'nvme-target-subsystem-host-info',\
+ 'host-nqn'
+ elif type == 'paths':
+ zapi_get, zapi_info, zapi_type = 'nvme-subsystem-map-get-iter', 'nvme-target-subsystem-map-info', 'path'
+ subsystem_get = netapp_utils.zapi.NaElement(zapi_get)
+ query = {
+ 'query': {
+ zapi_info: {
+ 'subsystem': self.parameters.get('subsystem'),
+ 'vserver': self.parameters.get('vserver')
+ }
+ }
+ }
+ subsystem_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(subsystem_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching subsystem info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attrs_list = result.get_child_by_name('attributes-list')
+ return_list = []
+ for item in attrs_list.get_children():
+ return_list.append(item[zapi_type])
+ return {type: return_list}
+ return None
+
+ def add_subsystem_host_map(self, data, type):
+ """
+ Add a NVME Subsystem host/map
+ :param: data: list of hosts/paths to be added
+ :param: type: hosts/paths
+ """
+ if type == 'hosts':
+ zapi_add, zapi_type = 'nvme-subsystem-host-add', 'host-nqn'
+ elif type == 'paths':
+ zapi_add, zapi_type = 'nvme-subsystem-map-add', 'path'
+
+ for item in data:
+ options = {'subsystem': self.parameters['subsystem'],
+ zapi_type: item
+ }
+ subsystem_add = netapp_utils.zapi.NaElement.create_node_with_children(zapi_add, **options)
+ try:
+ self.server.invoke_successfully(subsystem_add, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding %s for subsystem %s: %s'
+ % (item, self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_subsystem_host_map(self, data, type):
+ """
+ Remove a NVME Subsystem host/map
+ :param: data: list of hosts/paths to be added
+ :param: type: hosts/paths
+ """
+ if type == 'hosts':
+ zapi_remove, zapi_type = 'nvme-subsystem-host-remove', 'host-nqn'
+ elif type == 'paths':
+ zapi_remove, zapi_type = 'nvme-subsystem-map-remove', 'path'
+
+ for item in data:
+ options = {'subsystem': self.parameters['subsystem'],
+ zapi_type: item
+ }
+ subsystem_remove = netapp_utils.zapi.NaElement.create_node_with_children(zapi_remove, **options)
+ try:
+ self.server.invoke_successfully(subsystem_remove, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing %s for subsystem %s: %s'
+ % (item, self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def associate_host_map(self, types):
+ """
+ Check if there are hosts or paths to be associated with the subsystem
+ """
+ action_add_dict = {}
+ action_remove_dict = {}
+ for type in types:
+ if self.parameters.get(type):
+ current = self.get_subsystem_host_map(type)
+ if current:
+ add_items = self.na_helper.\
+ get_modified_attributes(current, self.parameters, get_list_diff=True).get(type)
+ remove_items = [item for item in current[type] if item not in self.parameters.get(type)]
+ else:
+ add_items = self.parameters[type]
+ remove_items = {}
+ if add_items:
+ action_add_dict[type] = add_items
+ self.na_helper.changed = True
+ if remove_items:
+ action_remove_dict[type] = remove_items
+ self.na_helper.changed = True
+ return action_add_dict, action_remove_dict
+
+ def modify_host_map(self, add_host_map, remove_host_map):
+ for type, data in add_host_map.items():
+ self.add_subsystem_host_map(data, type)
+ for type, data in remove_host_map.items():
+ self.remove_subsystem_host_map(data, type)
+
+ def apply(self):
+ """
+ Apply action to NVME subsystem
+ """
+ netapp_utils.ems_log_event("na_ontap_nvme_subsystem", self.server)
+ types = ['hosts', 'paths']
+ current = self.get_subsystem()
+ add_host_map, remove_host_map = dict(), dict()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action != 'delete' and self.parameters['state'] == 'present':
+ add_host_map, remove_host_map = self.associate_host_map(types)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_subsystem()
+ self.modify_host_map(add_host_map, remove_host_map)
+ elif cd_action == 'delete':
+ self.delete_subsystem()
+ elif cd_action is None:
+ self.modify_host_map(add_host_map, remove_host_map)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMESubsystem()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py
new file mode 100644
index 00000000..eef83911
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py
@@ -0,0 +1,284 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_object_store
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_object_store
+short_description: NetApp ONTAP manage object store config.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete object store config on ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified object store config should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the object store config to manage.
+ type: str
+
+ provider_type:
+ required: false
+ description:
+ - The name of the object store config provider.
+ type: str
+
+ server:
+ required: false
+ description:
+ - Fully qualified domain name of the object store config.
+ type: str
+
+ container:
+ required: false
+ description:
+ - Data bucket/container name used in S3 requests.
+ type: str
+
+ access_key:
+ required: false
+ description:
+ - Access key ID for AWS_S3 and SGWS provider types.
+ type: str
+
+ secret_password:
+ required: false
+ description:
+ - Secret access key for AWS_S3 and SGWS provider types.
+ type: str
+'''
+
+EXAMPLES = """
+- name: object store Create
+ na_ontap_object_store:
+ state: present
+ name: ansible
+ provider_type: SGWS
+ server: abc
+ container: abc
+ access_key: s3.amazonaws.com
+ secret_password: abc
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: object store Create
+ na_ontap_object_store:
+ state: absent
+ name: ansible
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapObjectStoreConfig(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ provider_type=dict(required=False, type='str'),
+ server=dict(required=False, type='str'),
+ container=dict(required=False, type='str'),
+ access_key=dict(required=False, type='str'),
+ secret_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_aggr_object_store(self):
+ """
+ Fetch details if object store config exists.
+ :return:
+ Dictionary of current details if object store config found
+ None if object store config is not found
+ """
+ if self.use_rest:
+ data = {'fields': 'uuid,name',
+ 'name': self.parameters['name']}
+ api = "cloud/targets"
+ message, error = self.rest_api.get(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message['records']) != 0:
+ return message['records'][0]
+ return None
+ else:
+ aggr_object_store_get_iter = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-object-store-config-get', **{'object-store-name': self.parameters['name']})
+ result = None
+ try:
+ result = self.server.invoke_successfully(aggr_object_store_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 denotes an object store not being found.
+ if to_native(error.code) == "15661":
+ pass
+ else:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return result
+
+ def create_aggr_object_store(self):
+ """
+ Create aggregate object store config
+ :return: None
+ """
+ required_keys = set(['provider_type', 'server', 'container', 'access_key'])
+ if not required_keys.issubset(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error provisioning object store %s: one of the following parameters are missing '
+ '%s' % (self.parameters['name'], ', '.join(required_keys)))
+ if self.use_rest:
+ data = {'name': self.parameters['name'],
+ 'provider_type': self.parameters['provider_type'],
+ 'server': self.parameters['server'],
+ 'container': self.parameters['container'],
+ 'access_key': self.parameters['access_key'],
+ 'owner': 'fabricpool'}
+ if self.parameters.get('secret_password'):
+ data['secret_password'] = self.parameters['secret_password']
+ api = "cloud/targets"
+ dummy, error = self.rest_api.post(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ options = {'object-store-name': self.parameters['name'],
+ 'provider-type': self.parameters['provider_type'],
+ 'server': self.parameters['server'],
+ 's3-name': self.parameters['container'],
+ 'access-key': self.parameters['access_key']}
+ if self.parameters.get('secret_password'):
+ options['secret-password'] = self.parameters['secret_password']
+ object_store_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-object-store-config-create', **options)
+
+ try:
+ self.server.invoke_successfully(object_store_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error provisioning object store config %s: %s"
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_aggr_object_store(self, uuid=None):
+ """
+ Delete aggregate object store config
+ :return: None
+ """
+ if self.use_rest:
+ api = "cloud/targets/%s" % uuid
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ object_store_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-object-store-config-delete', **{'object-store-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(object_store_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing object store config %s: %s" %
+ (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ """
+ Apply action to the object store config
+ :return: None
+ """
+ uuid = None
+ if not self.use_rest:
+ self.asup_log_for_cserver("na_ontap_object_store_config")
+ current = self.get_aggr_object_store()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_aggr_object_store()
+ elif cd_action == 'delete':
+ if self.use_rest:
+ uuid = current['uuid']
+ self.delete_aggr_object_store(uuid)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create Object Store Config class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppOntapObjectStoreConfig()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py
new file mode 100644
index 00000000..f676f7c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+''' This is an Ansible module for ONTAP to manage ports for various resources.
+
+ (c) 2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_ports
+short_description: NetApp ONTAP add/remove ports
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add or remove ports for broadcast domain and portset.
+
+options:
+ state:
+ description:
+ - Whether the specified port should be added or removed.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Name of the SVM.
+ - Specify this option when operating on portset.
+ type: str
+
+ names:
+ description:
+ - List of ports.
+ type: list
+ elements: str
+ required: true
+
+ resource_name:
+ description:
+ - name of the portset or broadcast domain.
+ type: str
+ required: true
+
+ resource_type:
+ description:
+ - type of the resource to add a port to or remove a port from.
+ choices: ['broadcast_domain', 'portset']
+ required: true
+ type: str
+
+ ipspace:
+ description:
+ - Specify the required ipspace for the broadcast domain.
+ - A domain ipspace can not be modified after the domain has been created.
+ type: str
+
+ portset_type:
+ description:
+ - Protocols accepted for portset.
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+
+'''
+
+EXAMPLES = '''
+
+ - name: broadcast domain remove port
+ tags:
+ - remove
+ na_ontap_ports:
+ state: absent
+ names: test-vsim1:e0d-1,test-vsim1:e0d-2
+ resource_type: broadcast_domain
+ resource_name: ansible_domain
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: broadcast domain add port
+ tags:
+ - add
+ na_ontap_ports:
+ state: present
+ names: test-vsim1:e0d-1,test-vsim1:e0d-2
+ resource_type: broadcast_domain
+ resource_name: ansible_domain
+ ipspace: Default
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: portset remove port
+ tags:
+ - remove
+ na_ontap_ports:
+ state: absent
+ names: lif_2
+ resource_type: portset
+ resource_name: portset_1
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: portset add port
+ tags:
+ - add
+ na_ontap_ports:
+ state: present
+ names: lif_2
+ resource_type: portset
+ resource_name: portset_1
+ portset_type: iscsi
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapPorts(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=False, type='str'),
+ names=dict(required=True, type='list', elements='str'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str', choices=['broadcast_domain', 'portset']),
+ ipspace=dict(required=False, type='str'),
+ portset_type=dict(required=False, type='str', choices=['fcp', 'iscsi', 'mixed']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('resource_type', 'portset', ['vserver']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ if self.parameters['resource_type'] == 'broadcast_domain':
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ elif self.parameters['resource_type'] == 'portset':
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+
+ def add_broadcast_domain_ports(self, ports):
+ """
+ Add broadcast domain ports
+ :param: ports to be added.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['resource_name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port for broadcast domain %s: %s' %
+ (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ :param: ports to be removed.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['resource_name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port for broadcast domain %s: %s' %
+ (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_broadcast_domain_ports(self):
+ """
+ Return details about the broadcast domain ports.
+ :return: Details about the broadcast domain ports. [] if not found.
+ :rtype: list
+ """
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', self.parameters['resource_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ ports = []
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ return ports
+
+ def remove_portset_ports(self, port):
+ """
+ Removes all existing ports from portset
+ :return: None
+ """
+ options = {'portset-name': self.parameters['resource_name'],
+ 'portset-port-name': port.strip()}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children('portset-remove', **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port in portset %s: %s' %
+ (self.parameters['resource_name'], to_native(error)), exception=traceback.format_exc())
+
+ def add_portset_ports(self, port):
+ """
+ Add the list of ports to portset
+ :return: None
+ """
+ options = {'portset-name': self.parameters['resource_name'],
+ 'portset-port-name': port.strip()}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children('portset-add', **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port in portset %s: %s' %
+ (self.parameters['resource_name'], to_native(error)), exception=traceback.format_exc())
+
+ def portset_get_iter(self):
+ """
+ Compose NaElement object to query current portset using vserver, portset-name and portset-type parameters
+ :return: NaElement object for portset-get-iter with query
+ """
+ portset_get = netapp_utils.zapi.NaElement('portset-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ portset_info = netapp_utils.zapi.NaElement('portset-info')
+ portset_info.add_new_child('vserver', self.parameters['vserver'])
+ portset_info.add_new_child('portset-name', self.parameters['resource_name'])
+ if self.parameters.get('portset_type'):
+ portset_info.add_new_child('portset-type', self.parameters['portset_type'])
+ query.add_child_elem(portset_info)
+ portset_get.add_child_elem(query)
+ return portset_get
+
+ def portset_get(self):
+ """
+ Get current portset info
+ :return: List of current ports if query successful, else return []
+ """
+ portset_get_iter = self.portset_get_iter()
+ result, ports = None, []
+ try:
+ result = self.server.invoke_successfully(portset_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching portset %s: %s'
+ % (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+ # return portset details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ portset_get_info = result.get_child_by_name('attributes-list').get_child_by_name('portset-info')
+ if int(portset_get_info.get_child_content('portset-port-total')) > 0:
+ port_info = portset_get_info.get_child_by_name('portset-port-info')
+ ports = [port.get_content() for port in port_info.get_children()]
+ return ports
+
+ def modify_broadcast_domain_ports(self):
+ """
+ compare current and desire ports. Call add or remove ports methods if needed.
+ :return: None.
+ """
+ current_ports = self.get_broadcast_domain_ports()
+ cd_ports = self.parameters['names']
+ if self.parameters['state'] == 'present':
+ ports_to_add = [port for port in cd_ports if port not in current_ports]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ self.add_broadcast_domain_ports(ports_to_add)
+ self.na_helper.changed = True
+
+ if self.parameters['state'] == 'absent':
+ ports_to_remove = [port for port in cd_ports if port in current_ports]
+ if len(ports_to_remove) > 0:
+ if not self.module.check_mode:
+ self.remove_broadcast_domain_ports(ports_to_remove)
+ self.na_helper.changed = True
+
+ def modify_portset_ports(self):
+ current_ports = self.portset_get()
+ cd_ports = self.parameters['names']
+ if self.parameters['state'] == 'present':
+ ports_to_add = [port for port in cd_ports if port not in current_ports]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ for port in ports_to_add:
+ self.add_portset_ports(port)
+ self.na_helper.changed = True
+
+ if self.parameters['state'] == 'absent':
+ ports_to_remove = [port for port in cd_ports if port in current_ports]
+ if len(ports_to_remove) > 0:
+ if not self.module.check_mode:
+ for port in ports_to_remove:
+ self.remove_portset_ports(port)
+ self.na_helper.changed = True
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_ports")
+ if self.parameters['resource_type'] == 'broadcast_domain':
+ self.modify_broadcast_domain_ports()
+ elif self.parameters['resource_type'] == 'portset':
+ self.modify_portset_ports()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ portset_obj = NetAppOntapPorts()
+ portset_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py
new file mode 100644
index 00000000..e2511f17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP Create/Delete portset
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete ONTAP portset, modify ports in a portset.
+ - Modify type(protocol) is not supported in ONTAP.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_portset
+options:
+ state:
+ description:
+ - If you want to create a portset.
+ default: present
+ type: str
+ vserver:
+ required: true
+ description:
+ - Name of the SVM.
+ type: str
+ name:
+ required: true
+ description:
+ - Name of the port set to create.
+ type: str
+ type:
+ description:
+ - Required for create.
+ - Protocols accepted for this portset.
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+ force:
+ description:
+ - If 'false' or not specified, the request will fail if there are any igroups bound to this portset.
+ - If 'true', forcibly destroy the portset, even if there are existing igroup bindings.
+ type: bool
+ default: False
+ ports:
+ description:
+ - Specify the ports associated with this portset. Should be comma separated.
+ - It represents the expected state of a list of ports at any time, and replaces the current value of ports.
+ - Adds a port if it is specified in expected state but not in current state.
+ - Deletes a port if it is in current state but not in expected state.
+ type: list
+ elements: str
+version_added: 2.8.0
+
+'''
+
+EXAMPLES = """
+ - name: Create Portset
+ na_ontap_portset:
+ state: present
+ vserver: vserver_name
+ name: portset_name
+ ports: a1
+ type: "{{ protocol type }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+
+ - name: Modify ports in portset
+ na_ontap_portset:
+ state: present
+ vserver: vserver_name
+ name: portset_name
+ ports: a1,a2
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+
+ - name: Delete Portset
+ na_ontap_portset:
+ state: absent
+ vserver: vserver_name
+ name: portset_name
+ force: True
+ type: "{{ protocol type }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPPortset(object):
+ """
+ Methods to create or delete portset
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ type=dict(required=False, type='str', choices=[
+ 'fcp', 'iscsi', 'mixed']),
+ force=dict(required=False, type='bool', default=False),
+ ports=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+
+ def portset_get_iter(self):
+ """
+ Compose NaElement object to query current portset using vserver, portset-name and portset-type parameters
+ :return: NaElement object for portset-get-iter with query
+ """
+ portset_get = netapp_utils.zapi.NaElement('portset-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ portset_info = netapp_utils.zapi.NaElement('portset-info')
+ portset_info.add_new_child('vserver', self.parameters['vserver'])
+ portset_info.add_new_child('portset-name', self.parameters['name'])
+ query.add_child_elem(portset_info)
+ portset_get.add_child_elem(query)
+ return portset_get
+
+ def portset_get(self):
+ """
+ Get current portset info
+ :return: Dictionary of current portset details if query successful, else return None
+ """
+ portset_get_iter = self.portset_get_iter()
+ result, portset_info = None, dict()
+ try:
+ result = self.server.invoke_successfully(portset_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching portset %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ # return portset details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ portset_get_info = result.get_child_by_name('attributes-list').get_child_by_name('portset-info')
+ portset_info['type'] = portset_get_info.get_child_content('portset-type')
+ if int(portset_get_info.get_child_content('portset-port-total')) > 0:
+ ports = portset_get_info.get_child_by_name('portset-port-info')
+ portset_info['ports'] = [port.get_content() for port in ports.get_children()]
+ else:
+ portset_info['ports'] = []
+ return portset_info
+ return None
+
+ def create_portset(self):
+ """
+ Create a portset
+ """
+ if self.parameters.get('type') is None:
+ self.module.fail_json(msg='Error: Missing required parameter for create (type)')
+ portset_info = netapp_utils.zapi.NaElement("portset-create")
+ portset_info.add_new_child("portset-name", self.parameters['name'])
+ portset_info.add_new_child("portset-type", self.parameters['type'])
+ try:
+ self.server.invoke_successfully(
+ portset_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating portset %s: %s" %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_portset(self):
+ """
+ Delete a portset
+ """
+ portset_info = netapp_utils.zapi.NaElement("portset-destroy")
+ portset_info.add_new_child("portset-name", self.parameters['name'])
+ if self.parameters.get('force'):
+ portset_info.add_new_child("force", str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(
+ portset_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting portset %s: %s" %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_ports(self, ports):
+ """
+ Removes all existing ports from portset
+ :return: None
+ """
+ for port in ports:
+ self.modify_port(port, 'portset-remove', 'removing')
+
+ def add_ports(self):
+ """
+ Add the list of ports to portset
+ :return: None
+ """
+ # don't add if ports is empty string
+ if self.parameters.get('ports') == [''] or self.parameters.get('ports') is None:
+ return
+ for port in self.parameters['ports']:
+ self.modify_port(port, 'portset-add', 'adding')
+
+ def modify_port(self, port, zapi, action):
+ """
+ Add or remove an port to/from a portset
+ """
+ port.strip() # remove leading spaces if any (eg: if user types a space after comma in initiators list)
+ options = {'portset-name': self.parameters['name'],
+ 'portset-port-name': port}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error %s port in portset %s: %s' % (action, self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Applies action from playbook
+ """
+ netapp_utils.ems_log_event("na_ontap_autosupport", self.server)
+ current, modify = self.portset_get(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ if self.parameters.get('type') and self.parameters['type'] != current['type']:
+ self.module.fail_json(msg="modify protocol(type) not supported and %s already exists in vserver %s under different type" %
+ (self.parameters['name'], self.parameters['vserver']))
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_portset()
+ self.add_ports()
+ elif cd_action == 'delete':
+ self.delete_portset()
+ elif modify:
+ self.remove_ports(current['ports'])
+ self.add_ports()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ portset_obj = NetAppONTAPPortset()
+ portset_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py
new file mode 100644
index 00000000..28b5a773
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_qos_adaptive_policy_group
+short_description: NetApp ONTAP Adaptive Quality of Service policy group.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@joshedmonds) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, destroy, modify, or rename an Adaptive QoS policy group on NetApp ONTAP. Module is based on the standard QoS policy group module.
+
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified policy group should exist or not.
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the policy group to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ from_name:
+ description:
+ - Name of the existing policy group to be renamed to name.
+ type: str
+
+ absolute_min_iops:
+ description:
+ - Absolute minimum IOPS defined by this policy.
+ type: str
+
+ expected_iops:
+ description:
+ - Minimum expected IOPS defined by this policy.
+ type: str
+
+ peak_iops:
+ description:
+ - Maximum possible IOPS per allocated or used TB|GB.
+ type: str
+
+ peak_iops_allocation:
+ choices: ['allocated_space', 'used_space']
+ description:
+ - Whether peak_iops is specified by allocated or used space.
+ default: 'used_space'
+ type: str
+
+ force:
+ type: bool
+ default: False
+ description:
+ - Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group.
+'''
+
+EXAMPLES = """
+ - name: create adaptive qos policy group
+ na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 100IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: allocated_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: modify adaptive qos policy group expected iops
+ na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 125IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: allocated_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: modify adaptive qos policy group peak iops allocation
+ na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 125IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: used_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: delete qos policy group
+ na_ontap_qos_adaptive_policy_group:
+ state: absent
+ name: aq_policy_1
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapAdaptiveQosPolicyGroup(object):
+ """
+ Create, delete, modify and rename a policy group.
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap qos policy group class.
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ absolute_min_iops=dict(required=False, type='str'),
+ expected_iops=dict(required=False, type='str'),
+ peak_iops=dict(required=False, type='str'),
+ peak_iops_allocation=dict(choices=['allocated_space', 'used_space'], default='used_space'),
+ force=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module)
+
+ def get_policy_group(self, policy_group_name=None):
+ """
+ Return details of a policy group.
+ :param policy_group_name: policy group name
+ :return: policy group details.
+ :rtype: dict.
+ """
+ if policy_group_name is None:
+ policy_group_name = self.parameters['name']
+ policy_group_get_iter = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-get-iter')
+ policy_group_info = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-info')
+ policy_group_info.add_new_child('policy-group', policy_group_name)
+ policy_group_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_group_info)
+ policy_group_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(policy_group_get_iter, True)
+ policy_group_detail = None
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-adaptive-policy-group-info')
+
+ policy_group_detail = {
+ 'name': policy_info.get_child_content('policy-group'),
+ 'vserver': policy_info.get_child_content('vserver'),
+ 'absolute_min_iops': policy_info.get_child_content('absolute-min-iops'),
+ 'expected_iops': policy_info.get_child_content('expected-iops'),
+ 'peak_iops': policy_info.get_child_content('peak-iops'),
+ 'peak_iops_allocation': policy_info.get_child_content('peak-iops-allocation')
+ }
+ return policy_group_detail
+
+ def create_policy_group(self):
+ """
+ create a policy group name.
+ """
+ policy_group = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-create')
+ policy_group.add_new_child('policy-group', self.parameters['name'])
+ policy_group.add_new_child('vserver', self.parameters['vserver'])
+ if self.parameters.get('absolute_min_iops'):
+ policy_group.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops'])
+ if self.parameters.get('expected_iops'):
+ policy_group.add_new_child('expected-iops', self.parameters['expected_iops'])
+ if self.parameters.get('peak_iops'):
+ policy_group.add_new_child('peak-iops', self.parameters['peak_iops'])
+ if self.parameters.get('peak_iops_allocation'):
+ policy_group.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation'])
+ try:
+ self.server.invoke_successfully(policy_group, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating adaptive qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_policy_group(self, policy_group=None):
+ """
+ delete an existing policy group.
+ :param policy_group: policy group name.
+ """
+ if policy_group is None:
+ policy_group = self.parameters['name']
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-delete')
+ policy_group_obj.add_new_child('policy-group', policy_group)
+ if self.parameters.get('force'):
+ policy_group_obj.add_new_child('force', str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting adaptive qos policy group %s: %s' %
+ (policy_group, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_policy_group(self):
+ """
+ Modify policy group.
+ """
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-modify')
+ policy_group_obj.add_new_child('policy-group', self.parameters['name'])
+ if self.parameters.get('absolute_min_iops'):
+ policy_group_obj.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops'])
+ if self.parameters.get('expected_iops'):
+ policy_group_obj.add_new_child('expected-iops', self.parameters['expected_iops'])
+ if self.parameters.get('peak_iops'):
+ policy_group_obj.add_new_child('peak-iops', self.parameters['peak_iops'])
+ if self.parameters.get('peak_iops_allocation'):
+ policy_group_obj.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation'])
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying adaptive qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_policy_group(self):
+ """
+ Rename policy group name.
+ """
+ rename_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-rename')
+ rename_obj.add_new_child('new-name', self.parameters['name'])
+ rename_obj.add_new_child('policy-group-name', self.parameters['from_name'])
+ try:
+ self.server.invoke_successfully(rename_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming adaptive qos policy group %s: %s' %
+ (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_helper(self, modify):
+ """
+ helper method to modify policy group.
+ :param modify: modified attributes.
+ """
+ for attribute in modify.keys():
+ if attribute in ['absolute_min_iops', 'expected_iops', 'peak_iops', 'peak_iops_allocation']:
+ self.modify_policy_group()
+
+ def apply(self):
+ """
+ Run module based on playbook
+ """
+ self.autosupport_log("na_ontap_qos_policy_group")
+ current = self.get_policy_group()
+ rename, cd_action = None, None
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_policy_group(self.parameters['from_name']), current)
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_policy_group()
+ if cd_action == 'create':
+ self.create_policy_group()
+ elif cd_action == 'delete':
+ self.delete_policy_group()
+ elif modify:
+ self.modify_helper(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def autosupport_log(self, event_name):
+ """
+ Create a log event against the provided vserver
+ """
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ netapp_utils.ems_log_event(event_name, server)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ qos_policy_group = NetAppOntapAdaptiveQosPolicyGroup()
+ qos_policy_group.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py
new file mode 100644
index 00000000..a74bc987
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_qos_policy_group
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_qos_policy_group
+short_description: NetApp ONTAP manage policy group in Quality of Service.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, destroy, modify, or rename QoS policy group on NetApp ONTAP.
+
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified policy group should exist or not.
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the policy group to manage.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the existing policy group to be renamed to name.
+ type: str
+
+ max_throughput:
+ description:
+ - Maximum throughput defined by this policy.
+ type: str
+
+ min_throughput:
+ description:
+ - Minimum throughput defined by this policy.
+ type: str
+
+ is_shared:
+ description:
+ - Whether the SLOs of the policy group are shared between the workloads or if the SLOs are applied separately to each workload.
+ type: bool
+ version_added: 20.12.0
+
+ force:
+ type: bool
+ default: False
+ description:
+ - Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group.
+'''
+
+EXAMPLES = """
+ - name: create qos policy group
+ na_ontap_qos_policy_group:
+ state: present
+ name: policy_1
+ vserver: policy_vserver
+ max_throughput: 800KB/s,800iops
+ min_throughput: 100iops
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: modify qos policy group max throughput
+ na_ontap_qos_policy_group:
+ state: present
+ name: policy_1
+ vserver: policy_vserver
+ max_throughput: 900KB/s,800iops
+ min_throughput: 100iops
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: delete qos policy group
+ na_ontap_qos_policy_group:
+ state: absent
+ name: policy_1
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapQosPolicyGroup(object):
+ """
+ Create, delete, modify and rename a policy group.
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap qos policy group class.
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ max_throughput=dict(required=False, type='str'),
+ min_throughput=dict(required=False, type='str'),
+ is_shared=dict(required=False, type='bool'),
+ force=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module)
+
+ def get_policy_group(self, policy_group_name=None):
+ """
+ Return details of a policy group.
+ :param policy_group_name: policy group name
+ :return: policy group details.
+ :rtype: dict.
+ """
+ if policy_group_name is None:
+ policy_group_name = self.parameters['name']
+ policy_group_get_iter = netapp_utils.zapi.NaElement('qos-policy-group-get-iter')
+ policy_group_info = netapp_utils.zapi.NaElement('qos-policy-group-info')
+ policy_group_info.add_new_child('policy-group', policy_group_name)
+ policy_group_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_group_info)
+ policy_group_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(policy_group_get_iter, True)
+ policy_group_detail = None
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-policy-group-info')
+
+ policy_group_detail = {
+ 'name': policy_info.get_child_content('policy-group'),
+ 'vserver': policy_info.get_child_content('vserver'),
+ 'max_throughput': policy_info.get_child_content('max-throughput'),
+ 'min_throughput': policy_info.get_child_content('min-throughput'),
+ 'is_shared': self.na_helper.get_value_for_bool(True, policy_info.get_child_content('is-shared'))
+ }
+ return policy_group_detail
+
+ def create_policy_group(self):
+ """
+ create a policy group name.
+ """
+ policy_group = netapp_utils.zapi.NaElement('qos-policy-group-create')
+ policy_group.add_new_child('policy-group', self.parameters['name'])
+ policy_group.add_new_child('vserver', self.parameters['vserver'])
+ if self.parameters.get('max_throughput'):
+ policy_group.add_new_child('max-throughput', self.parameters['max_throughput'])
+ if self.parameters.get('min_throughput'):
+ policy_group.add_new_child('min-throughput', self.parameters['min_throughput'])
+ if self.parameters.get('is_shared') is not None:
+ policy_group.add_new_child('is-shared', self.na_helper.get_value_for_bool(False, self.parameters['is_shared']))
+ try:
+ self.server.invoke_successfully(policy_group, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_policy_group(self, policy_group=None):
+ """
+ delete an existing policy group.
+ :param policy_group: policy group name.
+ """
+ if policy_group is None:
+ policy_group = self.parameters['name']
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-policy-group-delete')
+ policy_group_obj.add_new_child('policy-group', policy_group)
+ if self.parameters.get('force'):
+ policy_group_obj.add_new_child('force', str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting qos policy group %s: %s' %
+ (policy_group, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_policy_group(self):
+ """
+ Modify policy group.
+ """
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-policy-group-modify')
+ policy_group_obj.add_new_child('policy-group', self.parameters['name'])
+ if self.parameters.get('max_throughput'):
+ policy_group_obj.add_new_child('max-throughput', self.parameters['max_throughput'])
+ if self.parameters.get('min_throughput'):
+ policy_group_obj.add_new_child('min-throughput', self.parameters['min_throughput'])
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_policy_group(self):
+ """
+ Rename policy group name.
+ """
+ rename_obj = netapp_utils.zapi.NaElement('qos-policy-group-rename')
+ rename_obj.add_new_child('new-name', self.parameters['name'])
+ rename_obj.add_new_child('policy-group-name', self.parameters['from_name'])
+ try:
+ self.server.invoke_successfully(rename_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming qos policy group %s: %s' %
+ (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_helper(self, modify):
+ """
+ helper method to modify policy group.
+ :param modify: modified attributes.
+ """
+ if 'is_shared' in modify:
+ self.module.fail_json(msg='Error cannot modify is_shared attribute.')
+ if any([attribute in modify for attribute in ['max_throughput', 'min_throughput']]):
+ self.modify_policy_group()
+
+ def apply(self):
+ """
+ Run module based on playbook
+ """
+ self.asup_log_for_cserver("na_ontap_qos_policy_group")
+ current = self.get_policy_group()
+ rename, cd_action = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create policy by renaming an existing one
+ old_policy = self.get_policy_group(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_policy, current)
+ if rename:
+ current = old_policy
+ cd_action = None
+ if rename is None:
+ self.module.fail_json(msg='Error renaming qos policy group: cannot find %s' %
+ self.parameters['from_name'])
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_policy_group()
+ if cd_action == 'create':
+ self.create_policy_group()
+ elif cd_action == 'delete':
+ self.delete_policy_group()
+ elif modify:
+ self.modify_helper(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ qos_policy_group = NetAppOntapQosPolicyGroup()
+ qos_policy_group.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py
new file mode 100644
index 00000000..9d05d75b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_qtree
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_qtree
+
+short_description: NetApp ONTAP manage qtrees
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy Qtrees.
+
+options:
+
+ state:
+ description:
+ - Whether the specified qtree should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the qtree to manage.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the qtree to be renamed.
+ version_added: 2.7.0
+ type: str
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the qtree should exist on.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ export_policy:
+ description:
+ - The name of the export policy to apply.
+ version_added: 2.9.0
+ type: str
+
+ security_style:
+ description:
+ - The security style for the qtree.
+ choices: ['unix', 'ntfs', 'mixed']
+ type: str
+ version_added: 2.9.0
+
+ oplocks:
+ description:
+ - Whether the oplocks should be enabled or not for the qtree.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+
+ unix_permissions:
+ description:
+ - File permissions bits of the qtree.
+ version_added: 2.9.0
+ type: str
+
+ force_delete:
+ description:
+ - Whether the qtree should be deleted even if files still exist.
+ - Note that the default of true reflect the REST API behavior.
+ - a value of false is not supported with REST.
+ type: bool
+ default: true
+ version_added: 20.8.0
+
+ wait_for_completion:
+ description:
+ - Only applicable for REST. When using ZAPI, the deletion is always synchronous.
+ - Deleting a qtree may take time if many files need to be deleted.
+ - Set this parameter to 'true' for synchronous execution during delete.
+ - Set this parameter to 'false' for asynchronous execution.
+ - For asynchronous, execution exits as soon as the request is sent, and the qtree is deleted in background.
+ type: bool
+ default: true
+ version_added: 2.9.0
+
+ time_out:
+ description:
+ - Maximum time to wait for qtree deletion in seconds when wait_for_completion is True.
+ - Error out if task is not completed in defined time.
+ - Default is set to 3 minutes.
+ default: 180
+ type: int
+ version_added: 2.9.0
+'''
+
+EXAMPLES = """
+- name: Create Qtrees
+ na_ontap_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ export_policy: policyName
+ security_style: mixed
+ oplocks: disabled
+ unix_permissions:
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename Qtrees
+ na_ontap_qtree:
+ state: present
+ from_name: ansibleQTree_rename
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import datetime
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapQTree(object):
+ '''Class with qtree operations'''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ flexvol_name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ export_policy=dict(required=False, type='str'),
+ security_style=dict(required=False, type='str', choices=['unix', 'ntfs', 'mixed']),
+ oplocks=dict(required=False, type='str', choices=['enabled', 'disabled']),
+ unix_permissions=dict(required=False, type='str'),
+ force_delete=dict(required=False, type='bool', default=True),
+ wait_for_completion=dict(required=False, type='bool', default=True),
+ time_out=dict(required=False, type='int', default=180),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name'])
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+
+ def get_qtree(self, name=None):
+ """
+ Checks if the qtree exists.
+ :param:
+ name : qtree name
+ :return:
+ Details about the qtree
+ False if qtree is not found
+ :rtype: bool
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ api = "storage/qtrees"
+ query = {'fields': 'export_policy,unix_permissions,security_style,volume',
+ 'svm.name': self.parameters['vserver'],
+ 'volume': self.parameters['flexvol_name'],
+ 'name': name}
+ message, error = self.rest_api.get(api, query)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response in get_qtree from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ return message['records'][0]
+ else:
+ qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-info', **{'vserver': self.parameters['vserver'],
+ 'volume': self.parameters['flexvol_name'],
+ 'qtree': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ qtree_list_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(qtree_list_iter,
+ enable_tunneling=True)
+ return_q = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ return_q = {'export_policy': result['attributes-list']['qtree-info']['export-policy'],
+ 'oplocks': result['attributes-list']['qtree-info']['oplocks'],
+ 'security_style': result['attributes-list']['qtree-info']['security-style']}
+
+ if result['attributes-list']['qtree-info'].get_child_by_name('mode'):
+ return_q['unix_permissions'] = result['attributes-list']['qtree-info']['mode']
+ else:
+ return_q['unix_permissions'] = ''
+
+ return return_q
+
+ def create_qtree(self):
+ """
+ Create a qtree
+ """
+ if self.use_rest:
+ api = "storage/qtrees"
+ body = {'name': self.parameters['name'], 'volume': {'name': self.parameters['flexvol_name']},
+ 'svm': {'name': self.parameters['vserver']}}
+ if self.parameters.get('export_policy'):
+ body['export_policy'] = self.parameters['export_policy']
+ if self.parameters.get('security_style'):
+ body['security_style'] = self.parameters['security_style']
+ if self.parameters.get('unix_permissions'):
+ body['unix_permissions'] = self.parameters['unix_permissions']
+ __, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ options = {'qtree': self.parameters['name'], 'volume': self.parameters['flexvol_name']}
+ if self.parameters.get('export_policy'):
+ options['export-policy'] = self.parameters['export_policy']
+ if self.parameters.get('security_style'):
+ options['security-style'] = self.parameters['security_style']
+ if self.parameters.get('oplocks'):
+ options['oplocks'] = self.parameters['oplocks']
+ if self.parameters.get('unix_permissions'):
+ options['mode'] = self.parameters['unix_permissions']
+ qtree_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-create', **options)
+ try:
+ self.server.invoke_successfully(qtree_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error provisioning qtree %s: %s"
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_qtree(self, current):
+ """
+ Delete a qtree
+ """
+ if self.use_rest:
+ uuid = current['volume']['uuid']
+ qid = str(current['id'])
+ api = "storage/qtrees/%s/%s" % (uuid, qid)
+ query = {'return_timeout': 3}
+ response, error = self.rest_api.delete(api, params=query)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'job' in response and self.parameters['wait_for_completion']:
+ message, error = self.rest_api.wait_on_job(response['job'], timeout=self.parameters['time_out'], increment=10)
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ else:
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ options = {'qtree': path}
+ if self.parameters['force_delete']:
+ options['force'] = "true"
+ qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-delete', **options)
+
+ try:
+ self.server.invoke_successfully(qtree_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_qtree(self, current):
+ """
+ Rename a qtree
+ """
+ if self.use_rest:
+ body = {'name': self.parameters['name']}
+ uuid = current['volume']['uuid']
+ qid = str(current['id'])
+ api = "storage/qtrees/%s/%s" % (uuid, qid)
+ dummy, error = self.rest_api.patch(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['from_name'])
+ new_path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-rename', **{'qtree': path,
+ 'new-qtree-name': new_path})
+
+ try:
+ self.server.invoke_successfully(qtree_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error renaming qtree %s: %s"
+ % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_qtree(self, current):
+ """
+ Modify a qtree
+ """
+ if self.use_rest:
+ now = datetime.datetime.now()
+ body = {}
+ if self.parameters.get('security_style'):
+ body['security_style'] = self.parameters['security_style']
+ if self.parameters.get('unix_permissions'):
+ body['unix_permissions'] = self.parameters['unix_permissions']
+ if self.parameters.get('export_policy'):
+ body['export_policy'] = {'name': self.parameters['export_policy']}
+ uuid = current['volume']['uuid']
+ qid = str(current['id'])
+ api = "storage/qtrees/%s/%s" % (uuid, qid)
+ timeout = 120
+ query = {'return_timeout': timeout}
+ dummy, error = self.rest_api.patch(api, body, query)
+
+ later = datetime.datetime.now()
+ time_elapsed = later - now
+ # modify will not return any error if return_timeout is 0, so we set it to 120 seconds as default
+ if time_elapsed.seconds > (timeout - 1):
+ self.module.fail_json(msg="Too long to run")
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ options = {'qtree': self.parameters['name'], 'volume': self.parameters['flexvol_name']}
+ if self.parameters.get('export_policy'):
+ options['export-policy'] = self.parameters['export_policy']
+ if self.parameters.get('security_style'):
+ options['security-style'] = self.parameters['security_style']
+ if self.parameters.get('oplocks'):
+ options['oplocks'] = self.parameters['oplocks']
+ if self.parameters.get('unix_permissions'):
+ options['mode'] = self.parameters['unix_permissions']
+ qtree_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-modify', **options)
+ try:
+ self.server.invoke_successfully(qtree_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying qtree %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/delete/modify/rename operations'''
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_qtree", self.server)
+ current = self.get_qtree()
+ rename, cd_action, modify = None, None, None
+ if self.parameters.get('from_name'):
+ from_qtree = self.get_qtree(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(from_qtree, current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming: qtree %s does not exist' % self.parameters['from_name'])
+ if rename:
+ current = from_qtree
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ if self.parameters.get('security_style') and self.parameters['security_style'] != current['security_style']:
+ modify = True
+ if self.parameters.get('unix_permissions') and \
+ self.parameters['unix_permissions'] != str(current['unix_permissions']):
+ modify = True
+ # rest and zapi handle export policy differently
+ if self.use_rest:
+ if self.parameters.get('export_policy') and \
+ self.parameters['export_policy'] != current['export_policy']['name']:
+ modify = True
+ else:
+ if self.parameters.get('export_policy') and \
+ self.parameters['export_policy'] != current['export_policy']:
+ modify = True
+ if self.use_rest and cd_action == 'delete' and not self.parameters['force_delete']:
+ self.module.fail_json(msg='Error: force_delete option is not supported for REST, unless set to true.')
+
+ if modify:
+ self.na_helper.changed = True
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_qtree()
+ elif cd_action == 'delete':
+ self.delete_qtree(current)
+ else:
+ if rename:
+ self.rename_qtree(current)
+ if modify:
+ self.modify_qtree(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Apply qtree operations from playbook'''
+ qtree_obj = NetAppOntapQTree()
+ qtree_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py
new file mode 100644
index 00000000..527d8dff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_quota_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_quota_policy
+short_description: NetApp Ontap create, assign, rename or delete quota policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '19.11.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, assign, rename or delete the quota policy
+options:
+ state:
+ description:
+ - Whether the specified quota policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the quota policy.
+ required: true
+ type: str
+
+ name:
+ description:
+ - Specifies the quota policy name to create or rename to.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the existing quota policy to be renamed to name.
+ type: str
+
+ auto_assign:
+ description:
+ - when true, assign the policy to the vserver, whether it is newly created, renamed, or already exists.
+ - when true, the policy identified by name replaces the already assigned policy.
+ - when false, the policy is created if it does not already exist but is not assigned.
+ type: bool
+ default: true
+ version_added: 20.12.0
+"""
+
+EXAMPLES = """
+ - name: Create quota policy
+ na_ontap_quota_policy:
+ state: present
+ vserver: SVM1
+ name: ansible_policy
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Rename quota policy
+ na_ontap_quota_policy:
+ state: present
+ vserver: SVM1
+ name: new_ansible
+ from_name: ansible
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete quota policy
+ na_ontap_quota_policy:
+ state: absent
+ vserver: SVM1
+ name: ansible_policy
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+import ansible_collections.netapp.ontap.plugins.module_utils.zapis_svm as zapis_svm
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapQuotaPolicy(object):
+ """
+ Create, assign, rename or delete a quota policy
+ """
+
+ def __init__(self):
+ """
+ Initialize the ONTAP quota policy class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ auto_assign=dict(required=False, type='bool', default=True),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['name', 'vserver'])
+ ],
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_quota_policy(self, policy_name=None):
+
+ if policy_name is None:
+ policy_name = self.parameters['name']
+
+ return_value = None
+ quota_policy_get_iter = netapp_utils.zapi.NaElement('quota-policy-get-iter')
+ quota_policy_info = netapp_utils.zapi.NaElement('quota-policy-info')
+ quota_policy_info.add_new_child('policy-name', policy_name)
+ quota_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(quota_policy_info)
+ quota_policy_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(quota_policy_get_iter, True)
+ if result.get_child_by_name('attributes-list'):
+ quota_policy_attributes = result['attributes-list']['quota-policy-info']
+ return_value = {
+ 'name': quota_policy_attributes['policy-name']
+ }
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quota policy %s: %s' % (policy_name, to_native(error)),
+ exception=traceback.format_exc())
+ return return_value
+
+ def create_quota_policy(self):
+ """
+ Creates a new quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-create")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['name'])
+ quota_policy_obj.add_new_child("vserver", self.parameters['vserver'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating quota policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_quota_policy(self):
+ """
+ Deletes a quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-delete")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting quota policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_quota_policy(self):
+ """
+ Rename a quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-rename")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['from_name'])
+ quota_policy_obj.add_new_child("vserver", self.parameters['vserver'])
+ quota_policy_obj.add_new_child("new-policy-name", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming quota policy %s: %s' % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_quota_policy", self.server)
+ current = self.get_quota_policy()
+ # rename and create are mutually exclusive
+ rename, cd_action = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create policy by renaming it
+ rename = self.na_helper.is_rename_action(self.get_quota_policy(self.parameters['from_name']), current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming quota policy: %s does not exist.' % self.parameters['from_name'])
+
+ # check if policy should be assigned
+ assign_policy = cd_action == 'create' and self.parameters['auto_assign']
+ if cd_action is None and current and self.parameters['auto_assign']:
+ # find out if the existing policy needs to be changed
+ svm = zapis_svm.get_vserver(self.server, self.parameters['vserver'])
+ if svm.get('quota_policy') != self.parameters['name']:
+ assign_policy = True
+ self.na_helper.changed = True
+ if cd_action == 'delete':
+ # can't delete if already assigned
+ svm = zapis_svm.get_vserver(self.server, self.parameters['vserver'])
+ if svm.get('quota_policy') == self.parameters['name']:
+ self.module.fail_json(msg='Error policy %s cannot be deleted as it is assigned to the vserver %s' %
+ (self.parameters['name'], self.parameters['vserver']))
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_quota_policy()
+ elif cd_action == 'create':
+ self.create_quota_policy()
+ elif cd_action == 'delete':
+ self.delete_quota_policy()
+ if assign_policy:
+ zapis_svm.modify_vserver(self.server, self.module, self.parameters['vserver'], modify=dict(quota_policy=self.parameters['name']))
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap quota policy object and runs the correct play task
+ """
+ obj = NetAppOntapQuotaPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py
new file mode 100644
index 00000000..330a6986
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py
@@ -0,0 +1,450 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_quotas
+short_description: NetApp ONTAP Quotas
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Set/Modify/Delete quota on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified quota should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ vserver:
+ required: true
+ description:
+ - Name of the vserver to use.
+ type: str
+ volume:
+ description:
+ - The name of the volume that the quota resides on.
+ required: true
+ type: str
+ quota_target:
+ description:
+ - The quota target of the type specified.
+ required: true
+ type: str
+ qtree:
+ description:
+ - Name of the qtree for the quota.
+ - For user or group rules, it can be the qtree name or "" if no qtree.
+ - For tree type rules, this field must be "".
+ default: ""
+ type: str
+ type:
+ description:
+ - The type of quota rule
+ choices: ['user', 'group', 'tree']
+ required: true
+ type: str
+ policy:
+ description:
+ - Name of the quota policy from which the quota rule should be obtained.
+ type: str
+ set_quota_status:
+ description:
+ - Whether the specified volume should have quota status on or off.
+ type: bool
+ perform_user_mapping:
+ description:
+ - Whether quota management will perform user mapping for the user specified in quota-target.
+ - User mapping can be specified only for a user quota rule.
+ type: bool
+ version_added: 20.12.0
+ file_limit:
+ description:
+ - The number of files that the target can have.
+ type: str
+ disk_limit:
+ description:
+ - The amount of disk space that is reserved for the target.
+ type: str
+ soft_file_limit:
+ description:
+ - The number of files the target would have to exceed before a message is logged and an SNMP trap is generated.
+ type: str
+ soft_disk_limit:
+ description:
+ - The amount of disk space the target would have to exceed before a message is logged and an SNMP trap is generated.
+ type: str
+ threshold:
+ description:
+ - The amount of disk space the target would have to exceed before a message is logged.
+ type: str
+ activate_quota_on_change:
+ description:
+ - Method to use to activate quota on a change.
+ choices: ['resize', 'reinitialize', 'none']
+ default: resize
+ type: str
+ version_added: 20.12.0
+'''
+
+EXAMPLES = """
+ - name: Add/Set quota
+ na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Resize quota
+ na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ activate_quota_on_change: resize
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Reinitialize quota
+ na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ activate_quota_on_change: reinitialize
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: modify quota
+ na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ threshold: 3
+ set_quota_status: False
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete quota
+ na_ontap_quotas:
+ state: absent
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPQuotas(object):
+ '''Class with quotas methods'''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ volume=dict(required=True, type='str'),
+ quota_target=dict(required=True, type='str'),
+ qtree=dict(required=False, type='str', default=""),
+ type=dict(required=True, type='str', choices=['user', 'group', 'tree']),
+ policy=dict(required=False, type='str'),
+ set_quota_status=dict(required=False, type='bool'),
+ perform_user_mapping=dict(required=False, type='bool'),
+ file_limit=dict(required=False, type='str'),
+ disk_limit=dict(required=False, type='str'),
+ soft_file_limit=dict(required=False, type='str'),
+ soft_disk_limit=dict(required=False, type='str'),
+ threshold=dict(required=False, type='str'),
+ activate_quota_on_change=dict(required=False, type='str', choices=['resize', 'reinitialize', 'none'], default='resize')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_quota_status(self):
+ """
+ Return details about the quota status
+ :param:
+ name : volume name
+ :return: status of the quota. None if not found.
+ :rtype: dict
+ """
+ quota_status_get = netapp_utils.zapi.NaElement('quota-status')
+ quota_status_get.translate_struct({
+ 'volume': self.parameters['volume']
+ })
+ try:
+ result = self.server.invoke_successfully(quota_status_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quotas status info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result:
+ return result['status']
+ return None
+
+ def get_quotas(self):
+ """
+ Get quota details
+ :return: name of volume if quota exists, None otherwise
+ """
+ quota_get = netapp_utils.zapi.NaElement('quota-list-entries-iter')
+ query = {
+ 'query': {
+ 'quota-entry': {
+ 'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ quota_get.translate_struct(query)
+ if self.parameters.get('policy'):
+ quota_get['query']['quota-entry'].add_new_child('policy', self.parameters['policy'])
+ try:
+ result = self.server.invoke_successfully(quota_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quotas info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return_values = {'volume': result['attributes-list']['quota-entry']['volume'],
+ 'file_limit': result['attributes-list']['quota-entry']['file-limit'],
+ 'disk_limit': result['attributes-list']['quota-entry']['disk-limit'],
+ 'soft_file_limit': result['attributes-list']['quota-entry']['soft-file-limit'],
+ 'soft_disk_limit': result['attributes-list']['quota-entry']['soft-disk-limit'],
+ 'threshold': result['attributes-list']['quota-entry']['threshold']}
+ value = self.na_helper.safe_get(result, ['attributes-list', 'quota-entry', 'perform-user-mapping'])
+ if value is not None:
+ return_values['perform_user_mapping'] = self.na_helper.get_value_for_bool(True, value)
+ return return_values
+ return None
+
+ def quota_entry_set(self):
+ """
+ Adds a quota entry
+ """
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+
+ if self.parameters.get('file_limit'):
+ options['file-limit'] = self.parameters['file_limit']
+ if self.parameters.get('disk_limit'):
+ options['disk-limit'] = self.parameters['disk_limit']
+ if self.parameters.get('perform_user_mapping') is not None:
+ options['perform-user-mapping'] = str(self.parameters['perform_user_mapping'])
+ if self.parameters.get('soft_file_limit'):
+ options['soft-file-limit'] = self.parameters['soft_file_limit']
+ if self.parameters.get('soft_disk_limit'):
+ options['soft-disk-limit'] = self.parameters['soft_disk_limit']
+ if self.parameters.get('threshold'):
+ options['threshold'] = self.parameters['threshold']
+ if self.parameters.get('policy'):
+ options['policy'] = self.parameters['policy']
+ set_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-set-entry', **options)
+ try:
+ self.server.invoke_successfully(set_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding/modifying quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def quota_entry_delete(self):
+ """
+ Deletes a quota entry
+ """
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+ set_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-delete-entry', **options)
+ if self.parameters.get('policy'):
+ set_entry.add_new_child('policy', self.parameters['policy'])
+ try:
+ self.server.invoke_successfully(set_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def quota_entry_modify(self, modify_attrs):
+ """
+ Modifies a quota entry
+ """
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+ options.update(modify_attrs)
+ if self.parameters.get('file_limit'):
+ options['file-limit'] = self.parameters['file_limit']
+ if self.parameters.get('disk_limit'):
+ options['disk-limit'] = self.parameters['disk_limit']
+ if self.parameters.get('perform_user_mapping') is not None:
+ options['perform-user-mapping'] = str(self.parameters['perform_user_mapping'])
+ if self.parameters.get('soft_file_limit'):
+ options['soft-file-limit'] = self.parameters['soft_file_limit']
+ if self.parameters.get('soft_disk_limit'):
+ options['soft-disk-limit'] = self.parameters['soft_disk_limit']
+ if self.parameters.get('threshold'):
+ options['threshold'] = self.parameters['threshold']
+ if self.parameters.get('policy'):
+ options['policy'] = str(self.parameters['policy'])
+ modify_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-modify-entry', **options)
+ try:
+ self.server.invoke_successfully(modify_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def on_or_off_quota(self, status):
+ """
+ on or off quota
+ """
+ quota = netapp_utils.zapi.NaElement.create_node_with_children(
+ status, **{'volume': self.parameters['volume']})
+ try:
+ self.server.invoke_successfully(quota,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting %s for %s: %s'
+ % (status, self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def resize_quota(self):
+ """
+ resize quota
+ """
+ quota = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-resize', **{'volume': self.parameters['volume']})
+ try:
+ self.server.invoke_successfully(quota,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting %s for %s: %s'
+ % ('quota-resize', self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to quotas
+ """
+ netapp_utils.ems_log_event("na_ontap_quotas", self.server)
+ modify_quota_status = None
+ modify_quota = None
+ quota_status = None
+ current = self.get_quotas()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify_quota = self.na_helper.get_modified_attributes(current, self.parameters)
+ if 'set_quota_status' in self.parameters or modify_quota:
+ quota_status = self.get_quota_status()
+ if 'set_quota_status' in self.parameters and quota_status is not None:
+ quota_status_action = self.na_helper.get_modified_attributes(
+ {'set_quota_status': True if quota_status == 'on' else False}, self.parameters)
+ if quota_status_action:
+ modify_quota_status = 'quota-on' if quota_status_action['set_quota_status'] else 'quota-off'
+ if modify_quota is not None and modify_quota_status is None and quota_status == 'on':
+ # do we need to resize or reinitialize:
+ if self.parameters['activate_quota_on_change'] in ['resize', 'reinitialize']:
+ modify_quota_status = self.parameters['activate_quota_on_change']
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.quota_entry_set()
+ elif cd_action == 'delete':
+ self.quota_entry_delete()
+ elif modify_quota is not None:
+ for key in list(modify_quota):
+ modify_quota[key.replace("_", "-")] = modify_quota.pop(key)
+ self.quota_entry_modify(modify_quota)
+ if modify_quota_status in ['quota-off', 'quota-on']:
+ self.on_or_off_quota(modify_quota_status)
+ elif modify_quota_status == 'resize':
+ self.resize_quota()
+ elif modify_quota_status == 'reinitialize':
+ self.on_or_off_quota('quota-off')
+ time.sleep(10) # status switch interval
+ self.on_or_off_quota('quota-on')
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Execute action'''
+ quota_obj = NetAppONTAPQuotas()
+ quota_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py
new file mode 100644
index 00000000..3ddc8de7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_rest_cli
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Run system-cli commands on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_rest_cli
+short_description: NetApp ONTAP Run any cli command, the username provided needs to have console login permission.
+version_added: 2.9.0
+options:
+ command:
+ description:
+ - a string command.
+ required: true
+ type: str
+ verb:
+ description:
+ - a string indicating which api call to run
+ - OPTIONS is useful to know which verbs are supported by the REST API
+ choices: ['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']
+ required: true
+ type: str
+ params:
+ description:
+ - a dictionary of parameters to pass into the api call
+ type: dict
+ body:
+ description:
+ - a dictionary for info specification
+ type: dict
+'''
+
+EXAMPLES = """
+ - name: run ontap rest cli command
+ na_ontap_rest_cli:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: 'version'
+ verb: 'GET'
+
+ - name: run ontap rest cli command
+ na_ontap_rest_cli:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: 'security/login/motd'
+ verb: 'PATCH'
+ params: {'vserver': 'ansibleSVM'}
+ body: {'message': 'test'}
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPCommandREST(object):
+ ''' calls a CLI command '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='str'),
+ verb=dict(required=True, type='str', choices=['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']),
+ params=dict(required=False, type='dict', default={}),
+ body=dict(required=False, type='dict', default={})
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.rest_api = OntapRestAPI(self.module)
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.verb = parameters['verb']
+ self.params = parameters['params']
+ self.body = parameters['body']
+
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ self.module.fail_json(msg="use na_ontap_command for non-rest cli")
+
+ def run_command(self):
+ api = "private/cli/" + self.command
+
+ if self.verb == 'POST':
+ message, error = self.rest_api.post(api, self.body, self.params)
+ elif self.verb == 'GET':
+ message, error = self.rest_api.get(api, self.params)
+ elif self.verb == 'PATCH':
+ message, error = self.rest_api.patch(api, self.body, self.params)
+ elif self.verb == 'DELETE':
+ message, error = self.rest_api.delete(api, self.body, self.params)
+ elif self.verb == 'OPTIONS':
+ message, error = self.rest_api.options(api, self.params)
+ else:
+ self.module.fail_json(msg='Error running command %s:' % self.command,
+ exception=traceback.format_exc())
+
+ if error:
+ self.module.fail_json(msg=error)
+ return message
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = True
+ output = self.run_command()
+ self.module.exit_json(changed=changed, msg=output)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPCommandREST()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py
new file mode 100644
index 00000000..42d31c36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py
@@ -0,0 +1,617 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" NetApp ONTAP Info using REST APIs """
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_rest_info
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+short_description: NetApp ONTAP information gatherer using REST APIs
+description:
+ - This module allows you to gather various information about ONTAP configuration using REST APIs
+version_added: 20.5.0
+
+options:
+ state:
+ type: str
+ description:
+ - Returns "info"
+ default: "info"
+ choices: ['info']
+ gather_subset:
+ type: list
+ elements: str
+ description:
+ - When supplied, this argument will restrict the information collected
+ to a given subset. Either the info name or the Rest API can be given.
+ Possible values for this argument include
+ "aggregate_info" or "storage/aggregates",
+ "application_info" or "application/applications",
+ "application_template_info" or "application/templates",
+ "autosupport_config_info" or "support/autosupport",
+ "autosupport_messages_history" or "support/autosupport/messages",
+ "broadcast_domains_info" or "network/ethernet/broadcast-domains",
+ "cifs_home_directory_info" or "protocols/cifs/home-directory/search-paths",
+ "cifs_services_info" or "protocols/cifs/services",
+ "cifs_share_info" or "protocols/cifs/shares",
+ "cloud_targets_info" or "cloud/targets",
+ "cluster_chassis_info" or "cluster/chassis",
+ "cluster_jobs_info" or "cluster/jobs",
+ "cluster_metrics_info" or "cluster/metrics",
+ "cluster_node_info" or "cluster/nodes",
+ "cluster_peer_info" or "cluster/peers",
+ "cluster_schedules" or "cluster/schedules",
+ "cluster_software_download" or "cluster/software/download",
+ "cluster_software_history" or "cluster/software/history",
+ "cluster_software_packages" or "cluster/software/packages",
+ "disk_info" or "storage/disks",
+ "event_notification_info" or "support/ems/destinations",
+ "event_notification_destination_info" or "support/ems/destinations",
+ "initiator_groups_info" or "protocols/san/igroups",
+ "ip_interfaces_info" or "network/ip/interfaces",
+ "ip_routes_info" or "network/ip/routes",
+ "ip_service_policies" or "network/ip/service-policies",
+ "network_ipspaces_info" or "network/ipspaces",
+ "network_ports_info" or "network/ethernet/ports",
+ "ontap_system_version" or "cluster/software",
+ "san_fc_logins_info" or "network/fc/logins",
+ "san_fc_wppn-aliases" or "network/fc/wwpn-aliases",
+ "san_fcp_services" or "protocols/san/fcp/services",
+ "san_iscsi_credentials" or "protocols/san/iscsi/credentials",
+ "san_iscsi_services" or "protocols/san/iscsi/services",
+ "san_lun_maps" or "protocols/san/lun-maps",
+ "security_login_info" or "security/accounts",
+ "security_login_rest_role_info" or "security/roles",
+ "storage_flexcaches_info" or "storage/flexcache/flexcaches",
+ "storage_flexcaches_origin_info" or "storage/flexcache/origins",
+ "storage_luns_info" or "storage/luns",
+ "storage_NVMe_namespaces" or "storage/namespaces",
+ "storage_ports_info" or "storage/ports",
+ "storage_qos_policies" or "storage/qos/policies",
+ "storage_qtrees_config" or "storage/qtrees",
+ "storage_quota_reports" or "storage/quota/reports",
+ "storage_quota_policy_rules" or "storage/quota/rules",
+ "storage_shelves_config" or "storage/shelves",
+ "storage_snapshot_policies" or "storage/snapshot-policies",
+ "support_ems_config" or "support/ems",
+ "support_ems_events" or "support/ems/events",
+ "support_ems_filters" or "support/ems/filters",
+ "svm_dns_config_info" or "name-services/dns",
+ "svm_ldap_config_info" or "name-services/ldap",
+ "svm_name_mapping_config_info" or "name-services/name-mappings",
+ "svm_nis_config_info" or "name-services/nis",
+ "svm_peers_info" or "svm/peers",
+ "svm_peer-permissions_info" or "svm/peer-permissions",
+ "vserver_info" or "svm/svms",
+ "volume_info" or "storage/volumes",
+ Can specify a list of values to include a larger subset.
+ - REST APIs are supported with ONTAP 9.6 onwards.
+ default: "all"
+ max_records:
+ type: int
+ description:
+ - Maximum number of records returned in a single call.
+ default: 1024
+ fields:
+ type: list
+ elements: str
+ description:
+ - Request specific fields from subset.
+ '*' to return all the fields, one or more subsets are allowed.
+ '<list of fields>' to return specified fields, only one subset will be allowed.
+ - If the option is not present, return all the fields.
+ version_added: '20.6.0'
+ parameters:
+ description:
+ - Allows for any rest option to be passed in
+ type: dict
+ version_added: '20.7.0'
+'''
+
+EXAMPLES = '''
+- name: run ONTAP gather facts for vserver info
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - vserver_info
+- name: run ONTAP gather facts for aggregate info and volume info
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - aggregate_info
+ - volume_info
+- name: run ONTAP gather facts for all subsets
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - all
+- name: run ONTAP gather facts for aggregate info and volume info with fields section
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ fields:
+ - '*'
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - aggregate_info
+ - volume_info
+- name: run ONTAP gather facts for aggregate info with specified fields
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ fields:
+ - 'uuid'
+ - 'name'
+ - 'node'
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - aggregate_info
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPGatherInfo(object):
+ '''Class with gather info methods'''
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(type='str', choices=['info'], default='info', required=False),
+ gather_subset=dict(default=['all'], type='list', elements='str', required=False),
+ max_records=dict(type='int', default=1024, required=False),
+ fields=dict(type='list', elements='str', required=False),
+ parameters=dict(type='dict', required=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.fields = list()
+
+ self.rest_api = OntapRestAPI(self.module)
+
+ def validate_ontap_version(self):
+ """
+ Method to validate the ONTAP version
+ """
+
+ api = 'cluster'
+ data = {'fields': ['version']}
+
+ ontap_version, error = self.rest_api.get(api, data)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return ontap_version
+
+ def get_subset_info(self, gather_subset_info):
+ """
+ Gather ONTAP information for the given subset using REST APIs
+ Input for REST APIs call : (api, data)
+ return gathered_ontap_info
+ """
+
+ api = gather_subset_info['api_call']
+ if gather_subset_info.pop('post', False):
+ self.run_post(gather_subset_info)
+ data = {'max_records': self.parameters['max_records'], 'fields': self.fields}
+ # allow for passing in any additional rest api fields
+ if self.parameters.get('parameters'):
+ for each in self.parameters['parameters']:
+ data[each] = self.parameters['parameters'][each]
+
+ gathered_ontap_info, error = self.rest_api.get(api, data)
+
+ if error:
+ # Fail the module if error occurs from REST APIs call
+ if int(error.get('code', 0)) == 6:
+ self.module.fail_json(msg="%s user is not authorized to make %s api call" % (self.parameters.get('username'), api))
+ # if Aggr recommender can't make a recommendation it will fail with the following error code.
+ # We don't want to fail
+ elif int(error.get('code', 0)) == 19726344 and "No recommendation can be made for this cluster" in error.get('message'):
+ return error.get('message')
+ # If the API doesn't exist (using an older system) we don't want to fail
+ elif int(error.get('code', 0)) == 3:
+ return error.get('message')
+ else:
+ self.module.fail_json(msg=error)
+ else:
+ return gathered_ontap_info
+
+ return None
+
+ def run_post(self, gather_subset_info):
+ api = gather_subset_info['api_call']
+ post_return, error = self.rest_api.post(api, None)
+ if error:
+ return None
+ message, error = self.rest_api.wait_on_job(post_return['job'], increment=5)
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def get_next_records(self, api):
+ """
+ Gather next set of ONTAP information for the specified api
+ Input for REST APIs call : (api, data)
+ return gather_subset_info
+ """
+
+ data = {}
+ gather_subset_info, error = self.rest_api.get(api, data)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return gather_subset_info
+
+ def convert_subsets(self):
+ """
+ Convert an info to the REST API
+ """
+ info_to_rest_mapping = {
+ "aggregate_info": "storage/aggregates",
+ "application_info": "application/applications",
+ "application_template_info": "application/templates",
+ "autosupport_config_info": "support/autosupport",
+ "autosupport_messages_history": "support/autosupport/messages",
+ "broadcast_domains_info": "network/ethernet/broadcast-domains",
+ "cifs_home_directory_info": "protocols/cifs/home-directory/search-paths",
+ "cifs_services_info": "protocols/cifs/services",
+ "cifs_share_info": "protocols/cifs/shares",
+ "cloud_targets_info": "cloud/targets",
+ "cluster_chassis_info": "cluster/chassis",
+ "cluster_jobs_info": "cluster/jobs",
+ "cluster_metrocluster_diagnostics": "cluster/metrocluster/diagnostics",
+ "cluster_metrics_info": "cluster/metrics",
+ "cluster_node_info": "cluster/nodes",
+ "cluster_peer_info": "cluster/peers",
+ "cluster_schedules": "cluster/schedules",
+ "cluster_software_download": "cluster/software/download",
+ "cluster_software_history": "cluster/software/history",
+ "cluster_software_packages": "cluster/software/packages",
+ "disk_info": "storage/disks",
+ "event_notification_info": "support/ems/destinations",
+ "event_notification_destination_info": "support/ems/destinations",
+ "initiator_groups_info": "protocols/san/igroups",
+ "ip_interfaces_info": "network/ip/interfaces",
+ "ip_routes_info": "network/ip/routes",
+ "ip_service_policies": "network/ip/service-policies",
+ "network_ipspaces_info": "network/ipspaces",
+ "network_ports_info": "network/ethernet/ports",
+ "ontap_system_version": "cluster/software",
+ "san_fc_logins_info": "network/fc/logins",
+ "san_fc_wppn-aliases": "network/fc/wwpn-aliases",
+ "san_fcp_services": "protocols/san/fcp/services",
+ "san_iscsi_credentials": "protocols/san/iscsi/credentials",
+ "san_iscsi_services": "protocols/san/iscsi/services",
+ "san_lun_maps": "protocols/san/lun-maps",
+ "security_login_info": "security/accounts",
+ "security_login_rest_role_info": "security/roles",
+ "storage_flexcaches_info": "storage/flexcache/flexcaches",
+ "storage_flexcaches_origin_info": "storage/flexcache/origins",
+ "storage_luns_info": "storage/luns",
+ "storage_NVMe_namespaces": "storage/namespaces",
+ "storage_ports_info": "storage/ports",
+ "storage_qos_policies": "storage/qos/policies",
+ "storage_qtrees_config": "storage/qtrees",
+ "storage_quota_reports": "storage/quota/reports",
+ "storage_quota_policy_rules": "storage/quota/rules",
+ "storage_shelves_config": "storage/shelves",
+ "storage_snapshot_policies": "storage/snapshot-policies",
+ "support_ems_config": "support/ems",
+ "support_ems_events": "support/ems/events",
+ "support_ems_filters": "support/ems/filters",
+ "svm_dns_config_info": "name-services/dns",
+ "svm_ldap_config_info": "name-services/ldap",
+ "svm_name_mapping_config_info": "name-services/name-mappings",
+ "svm_nis_config_info": "name-services/nis",
+ "svm_peers_info": "svm/peers",
+ "svm_peer-permissions_info": "svm/peer-permissions",
+ "vserver_info": "svm/svms",
+ "volume_info": "storage/volumes"
+ }
+ # Add rest API names as there info version, also make sure we don't add a duplicate
+ subsets = []
+ for subset in self.parameters['gather_subset']:
+ if subset in info_to_rest_mapping:
+ if info_to_rest_mapping[subset] not in subsets:
+ subsets.append(info_to_rest_mapping[subset])
+ else:
+ if subset not in subsets:
+ subsets.append(subset)
+ return subsets
+
+ def apply(self):
+ """
+ Perform pre-checks, call functions and exit
+ """
+
+ result_message = dict()
+
+ # Validating ONTAP version
+ self.validate_ontap_version()
+
+ # Defining gather_subset and appropriate api_call
+ get_ontap_subset_info = {
+ 'application/applications': {
+ 'api_call': 'application/applications',
+ },
+ 'application/templates': {
+ 'api_call': 'application/templates',
+ },
+ 'cloud/targets': {
+ 'api_call': 'cloud/targets',
+ },
+ 'cluster/chassis': {
+ 'api_call': 'cluster/chassis',
+ },
+ 'cluster/jobs': {
+ 'api_call': 'cluster/jobs',
+ },
+ 'cluster/metrocluster/diagnostics': {
+ 'api_call': 'cluster/metrocluster/diagnostics',
+ 'post': True
+ },
+ 'cluster/metrics': {
+ 'api_call': 'cluster/metrics',
+ },
+ 'cluster/nodes': {
+ 'api_call': 'cluster/nodes',
+ },
+ 'cluster/peers': {
+ 'api_call': 'cluster/peers',
+ },
+ 'cluster/schedules': {
+ 'api_call': 'cluster/schedules',
+ },
+ 'cluster/software': {
+ 'api_call': 'cluster/software',
+ },
+ 'cluster/software/download': {
+ 'api_call': 'cluster/software/download',
+ },
+ 'cluster/software/history': {
+ 'api_call': 'cluster/software/history',
+ },
+ 'cluster/software/packages': {
+ 'api_call': 'cluster/software/packages',
+ },
+ 'name-services/dns': {
+ 'api_call': 'name-services/dns',
+ },
+ 'name-services/ldap': {
+ 'api_call': 'name-services/ldap',
+ },
+ 'name-services/name-mappings': {
+ 'api_call': 'name-services/name-mappings',
+ },
+ 'name-services/nis': {
+ 'api_call': 'name-services/nis',
+ },
+ 'network/ethernet/broadcast-domains': {
+ 'api_call': 'network/ethernet/broadcast-domains',
+ },
+ 'network/ethernet/ports': {
+ 'api_call': 'network/ethernet/ports',
+ },
+ 'network/fc/logins': {
+ 'api_call': 'network/fc/logins',
+ },
+ 'network/fc/wwpn-aliases': {
+ 'api_call': 'network/fc/wwpn-aliases',
+ },
+ 'network/ip/interfaces': {
+ 'api_call': 'network/ip/interfaces',
+ },
+ 'network/ip/routes': {
+ 'api_call': 'network/ip/routes',
+ },
+ 'network/ip/service-policies': {
+ 'api_call': 'network/ip/service-policies',
+ },
+ 'network/ipspaces': {
+ 'api_call': 'network/ipspaces',
+ },
+ 'protocols/cifs/home-directory/search-paths': {
+ 'api_call': 'protocols/cifs/home-directory/search-paths',
+ },
+ 'protocols/cifs/services': {
+ 'api_call': 'protocols/cifs/services',
+ },
+ 'protocols/cifs/shares': {
+ 'api_call': 'protocols/cifs/shares',
+ },
+ 'protocols/san/fcp/services': {
+ 'api_call': 'protocols/san/fcp/services',
+ },
+ 'protocols/san/igroups': {
+ 'api_call': 'protocols/san/igroups',
+ },
+ 'protocols/san/iscsi/credentials': {
+ 'api_call': 'protocols/san/iscsi/credentials',
+ },
+ 'protocols/san/iscsi/services': {
+ 'api_call': 'protocols/san/iscsi/services',
+ },
+ 'protocols/san/lun-maps': {
+ 'api_call': 'protocols/san/lun-maps',
+ },
+ 'security/accounts': {
+ 'api_call': 'security/accounts',
+ },
+ 'security/roles': {
+ 'api_call': 'security/roles',
+ },
+ 'storage/aggregates': {
+ 'api_call': 'storage/aggregates',
+ },
+ 'storage/disks': {
+ 'api_call': 'storage/disks',
+ },
+ 'storage/flexcache/flexcaches': {
+ 'api_call': 'storage/flexcache/flexcaches',
+ },
+ 'storage/flexcache/origins': {
+ 'api_call': 'storage/flexcache/origins',
+ },
+ 'storage/luns': {
+ 'api_call': 'storage/luns',
+ },
+ 'storage/namespaces': {
+ 'api_call': 'storage/namespaces',
+ },
+ 'storage/ports': {
+ 'api_call': 'storage/ports',
+ },
+ 'storage/qos/policies': {
+ 'api_call': 'storage/qos/policies',
+ },
+ 'storage/qtrees': {
+ 'api_call': 'storage/qtrees',
+ },
+ 'storage/quota/reports': {
+ 'api_call': 'storage/quota/reports',
+ },
+ 'storage/quota/rules': {
+ 'api_call': 'storage/quota/rules',
+ },
+ 'storage/shelves': {
+ 'api_call': 'storage/shelves',
+ },
+ 'storage/snapshot-policies': {
+ 'api_call': 'storage/snapshot-policies',
+ },
+ 'storage/volumes': {
+ 'api_call': 'storage/volumes',
+ },
+ 'support/autosupport': {
+ 'api_call': 'support/autosupport',
+ },
+ 'support/autosupport/messages': {
+ 'api_call': 'support/autosupport/messages',
+ },
+ 'support/ems': {
+ 'api_call': 'support/ems',
+ },
+ 'support/ems/destinations': {
+ 'api_call': 'support/ems/destinations',
+ },
+ 'support/ems/events': {
+ 'api_call': 'support/ems/events',
+ },
+ 'support/ems/filters': {
+ 'api_call': 'support/ems/filters',
+ },
+ 'svm/peers': {
+ 'api_call': 'svm/peers',
+ },
+ 'svm/peer-permissions': {
+ 'api_call': 'svm/peer-permissions',
+ },
+ 'svm/svms': {
+ 'api_call': 'svm/svms',
+ }
+ }
+
+ if 'all' in self.parameters['gather_subset']:
+ # If all in subset list, get the information of all subsets
+ self.parameters['gather_subset'] = sorted(get_ontap_subset_info.keys())
+
+ length_of_subsets = len(self.parameters['gather_subset'])
+
+ if self.parameters.get('fields') is not None:
+ # If multiple fields specified to return, convert list to string
+ self.fields = ','.join(self.parameters.get('fields'))
+
+ if self.fields != '*' and length_of_subsets > 1:
+ # Restrict gather subsets to one subset if fields section is list_of_fields
+ self.module.fail_json(msg="Error: fields: %s, only one subset will be allowed." % self.parameters.get('fields'))
+ converted_subsets = self.convert_subsets()
+
+ for subset in converted_subsets:
+ try:
+ # Verify whether the supported subset passed
+ specified_subset = get_ontap_subset_info[subset]
+ except KeyError:
+ self.module.fail_json(msg="Specified subset %s is not found, supported subsets are %s" %
+ (subset, list(get_ontap_subset_info.keys())))
+
+ result_message[subset] = self.get_subset_info(specified_subset)
+
+ if result_message[subset] is not None:
+ if isinstance(result_message[subset], dict):
+ while result_message[subset]['_links'].get('next'):
+ # Get all the set of records if next link found in subset_info for the specified subset
+ next_api = result_message[subset]['_links']['next']['href']
+ gathered_subset_info = self.get_next_records(next_api.replace('/api', ''))
+
+ # Update the subset info for the specified subset
+ result_message[subset]['_links'] = gathered_subset_info['_links']
+ result_message[subset]['records'].extend(gathered_subset_info['records'])
+
+ # metrocluster doesn't have a records field, so we need to skip this
+ if result_message[subset].get('records') is not None:
+ # Getting total number of records
+ result_message[subset]['num_records'] = len(result_message[subset]['records'])
+
+ self.module.exit_json(changed='False', state=self.parameters['state'], ontap_info=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ obj = NetAppONTAPGatherInfo()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py
new file mode 100644
index 00000000..9cc4b3ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Call a REST API on ONTAP.
+ - Cluster REST API are run using a cluster admin account.
+ - Vserver REST API can be run using a vsadmin account or using vserver tunneling (cluster admin with I(vserver_) options).
+ - In case of success, a json dictionary is returned as C(response).
+ - In case of a REST API error, C(status_code), C(error_code), C(error_message) are set to help with diagnosing the issue,
+ - and the call is reported as an error ('failed').
+ - Other errors (eg connection issues) are reported as Ansible error.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_restit
+short_description: NetApp ONTAP Run any REST API on ONTAP
+version_added: "20.4.0"
+options:
+ api:
+ description:
+ - The REST API to call (eg I(cluster/software), I(svms/svm)).
+ required: true
+ type: str
+ method:
+ description:
+ - The REST method to use.
+ default: GET
+ type: str
+ query:
+ description:
+ - A list of dictionaries for the query parameters
+ type: dict
+ body:
+ description:
+ - A dictionary for the info parameter
+ type: dict
+ aliases: ['info']
+ vserver_name:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+ vserver_uuid:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+ hal_linking:
+ description:
+ - if true, HAL-encoded links are returned in the response.
+ default: false
+ type: bool
+'''
+
+EXAMPLES = """
+-
+ name: Ontap REST API
+ hosts: localhost
+ gather_facts: False
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ admin_ip }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ https: true
+ validate_certs: false
+ svm_login: &svm_login
+ hostname: "{{ svm_admin_ip }}"
+ username: "{{ svm_admin_username }}"
+ password: "{{ svm_admin_password }}"
+ https: true
+ validate_certs: false
+
+ tasks:
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: cluster/software
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: cluster/software
+ query:
+ fields: version
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: svm/svms
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: svm/svms
+ query:
+ fields: aggregates,cifs,nfs,uuid
+ query_fields: name
+ query: trident_svm
+ hal_linking: true
+ register: result
+ - debug: var=result
+
+ - name: run ontap REST API command as vsadmin
+ na_ontap_restit:
+ <<: *svm_login
+ api: svm/svms
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: storage/volumes
+ vserver_name: ansibleSVM
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+ - set_fact:
+ uuid: "{{ result.response.records | json_query(get_uuid) }}"
+ vars:
+ get_uuid: "[? name=='deleteme_ln1'].uuid"
+ - debug: var=uuid
+
+ - name: run ontap REST API command as DELETE method with vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: "storage/volumes/{{ uuid[0] }}"
+ method: DELETE
+ vserver_name: ansibleSVM
+ query:
+ return_timeout: 60
+ register: result
+ when: uuid|length == 1
+ - debug: var=result
+ - assert: { that: result.skipped|default(false) or result.status_code|default(404) == 200, quiet: True }
+
+ - name: run ontap REST API command as POST method with vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: storage/volumes
+ method: POST
+ vserver_name: ansibleSVM
+ query:
+ return_records: "true"
+ return_timeout: 60
+ body:
+ name: deleteme_ln1
+ aggregates:
+ - name: aggr1
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==201, quiet: True }
+
+ - name: run ontap REST API command as DELETE method with vserver tunneling
+ # delete test volume if present
+ na_ontap_restit:
+ <<: *login
+ api: "storage/volumes/{{ result.response.records[0].uuid }}"
+ method: DELETE
+ vserver_name: ansibleSVM
+ query:
+ return_timeout: 60
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+# error cases
+ - name: run ontap REST API command
+ na_ontap_restit:
+ <<: *login
+ api: unknown/endpoint
+ register: result
+ ignore_errors: True
+ - debug: var=result
+ - assert: { that: result.status_code==404, quiet: True }
+
+"""
+
+RETURN = """
+response:
+ description:
+ - If successful, a json dictionary returned by the REST API.
+ - If the REST API was executed but failed, an empty dictionary.
+ - Not present if the REST API call cannot be performed.
+ returned: On success
+ type: dict
+status_code:
+ description:
+ - The http status code.
+ returned: Always
+ type: str
+error_code:
+ description:
+ - If the REST API was executed but failed, the error code set by the REST API.
+ - Not present if successful, or if the REST API call cannot be performed.
+ returned: On error
+ type: str
+error_message:
+ description:
+ - If the REST API was executed but failed, the error message set by the REST API.
+ - Not present if successful, or if the REST API call cannot be performed.
+ returned: On error
+ type: str
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPRestAPI(object):
+ ''' calls a REST API command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ api=dict(required=True, type='str'),
+ method=dict(required=False, type='str', default='GET'),
+ query=dict(required=False, type='dict'),
+ body=dict(required=False, type='dict', aliases=['info']),
+ vserver_name=dict(required=False, type='str'),
+ vserver_uuid=dict(required=False, type='str'),
+ hal_linking=dict(required=False, type='bool', default=False),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.api = parameters['api']
+ self.method = parameters['method']
+ self.query = parameters['query']
+ self.body = parameters['body']
+ self.vserver_name = parameters['vserver_name']
+ self.vserver_uuid = parameters['vserver_uuid']
+ self.hal_linking = parameters['hal_linking']
+
+ self.rest_api = OntapRestAPI(self.module)
+
+ def run_api(self):
+ ''' calls the REST API '''
+ # TODO, log usage
+
+ if self.hal_linking:
+ content_type = 'application/hal+json'
+ else:
+ content_type = 'application/json'
+ status, response, error = self.rest_api.send_request(self.method, self.api, self.query, self.body,
+ accept=content_type,
+ vserver_name=self.vserver_name, vserver_uuid=self.vserver_uuid)
+ if error:
+ if isinstance(error, dict):
+ error_message = error.pop('message', None)
+ error_code = error.pop('code', None)
+ if not error:
+ # we exhausted the dictionary
+ error = 'check error_message and error_code for details.'
+ else:
+ error_message = error
+ error_code = None
+
+ msg = "Error when calling '%s': %s" % (self.api, str(error))
+ self.module.fail_json(msg=msg, status_code=status, response=response, error_message=error_message, error_code=error_code)
+
+ return status, response
+
+ def apply(self):
+ ''' calls the api and returns json output '''
+ status_code, response = self.run_api()
+ self.module.exit_json(changed=True, status_code=status_code, response=response)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ restapi = NetAppONTAPRestAPI()
+ restapi.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py
new file mode 100644
index 00000000..2561029e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py
@@ -0,0 +1,455 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_security_certificates
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_security_certificates
+short_description: NetApp ONTAP manage security certificates.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.7.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Install or delete security certificates on ONTAP. (Create and sign will come in a second iteration)
+
+options:
+
+ state:
+ description:
+ - Whether the specified security certificate should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ common_name:
+ description:
+ - Common name of the certificate.
+ - Required for create and install.
+ - If name is present, ignored for sign and delete.
+ - If name is absent or ignored, required for sign and delete.
+ type: str
+
+ name:
+ description:
+ - The unique name of the security certificate per SVM.
+ - This parameter is not supported for ONTAP 9.6 or 9.7, as the REST API does not support it.
+ - If present with ONTAP 9.6 or 9.7, it is ignored by default, see I(ignore_name_if_not_supported).
+ - It is strongly recommended to use name for newer releases of ONTAP.
+ type: str
+
+ svm:
+ description:
+ - The name of the SVM (vserver).
+ - If present, the certificate is installed in the SVM.
+ - If absent, the certificate is installed in the cluster.
+ type: str
+ aliases:
+ - vserver
+
+ type:
+ description:
+ - Type of certificate
+ - Required for create and install.
+ - If name is present, ignored for sign and delete.
+ - If name is absent or ignored, required for sign and delete.
+ choices: ['client', 'server', 'client_ca', 'server_ca', 'root_ca']
+ type: str
+
+ public_certificate:
+ description:
+ - Public key certificate in PEM format.
+ - Required when installing a certificate. Ignored otherwise.
+ type: str
+
+ private_key:
+ description:
+ - Private key certificate in PEM format.
+ - Required when installing a CA-signed certificate. Ignored otherwise.
+ type: str
+
+ signing_request:
+ description:
+ - If present, the certificate identified by name and svm is used to sign the request.
+ - A signed certificate is returned.
+ type: str
+
+ expiry_time:
+ description:
+ - Certificate expiration time. Specifying an expiration time is recommended when creating a certificate.
+ - Can be provided when signing a certificate.
+ type: str
+
+ key_size:
+ description:
+ - Key size of the certificate in bits. Specifying a strong key size is recommended when creating a certificate.
+ - Ignored for sign and delete.
+ type: int
+
+ hash_function:
+ description:
+ - Hashing function. Can be provided when creating a self-signed certificate or when signing a certificate.
+ - Allowed values for create and sign are sha256, sha224, sha384, sha512.
+ type: str
+
+ intermediate_certificates:
+ description:
+ - Chain of intermediate Certificates in PEM format.
+ - Only valid when installing a certificate.
+ type: list
+ elements: str
+
+ ignore_name_if_not_supported:
+ description:
+ - ONTAP 9.6 and 9.7 REST API does not support I(name).
+ - If set to true, no error is reported if I(name) is present, and I(name) is not used.
+ type: bool
+ default: true
+ version_added: '20.8.0'
+
+'''
+
+EXAMPLES = """
+- name: install certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_common_name }}"
+ name: "{{ ontap_cert_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+ svm: "{{ vserver }}"
+
+- name: create certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ name: "{{ ontap_cert_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ expiry_time: P365DT # one year
+
+- name: sign certificate using newly create certificate
+ tags: sign_request
+ na_ontap_security_certificates:
+ # <<: *login
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+ signing_request: |
+ -----BEGIN CERTIFICATE REQUEST-----
+ MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
+ DAlTdW5ueXZhbGUxDzANBgNVBAoMBk5ldEFwcDCCASIwDQYJKoZIhvcNAQEBBQAD
+ ggEPADCCAQoCggEBALgXCj6Si/I4xLdV7wjWYTbt8jY20fQOjk/4E7yBT1vFBflE
+ ks6YDc6dhC2G18cnoj9E3DiR8lIHPoAlFB/VmBNDev3GZkbFlrbV7qYmf8OEx2H2
+ tAefgSP0jLmCHCN1yyhJoCG6FsAiD3tf6yoyFF6qS9ureGL0tCJJ/osx64WzUz+Q
+ EN8lx7VSxriEFMSjreXZDhUFaCdIYKKRENuEWyYvdy5cbBmczhuM8EP6peOVv5Hm
+ BJzPUDkq7oTtEHmttpATq2Y92qzNzETO0bXN5X/93AWri8/yEXdX+HEw1C/omtsE
+ jGsCXrCrIJ+DgUdT/GHNdBWlXl/cWGtEgEQ4vrUCAwEAAaAAMA0GCSqGSIb3DQEB
+ CwUAA4IBAQBjZNoQgr/JDm1T8zyRhLkl3zw4a16qKNu/MS7prqZHLVQgrptHRegU
+ Hbz11XoHfVOdbyuvtzEe95QsDd6FYCZ4qzZRF3se4IjMeqwdQZ5WP0/GFiwM8Uln
+ /0TCWjt759XMeUX7+wgOg5NRjJ660eWMXzu/UJf+vZO0Q2FiPIr13JvvY3TjT+9J
+ UUtK4r9PaUuOPN2YL9IQqSD3goh8302Qr3nBXUgjeUGLkgfUM5S39apund2hyTX2
+ JCLQsKr88pwU9iDho2tHLv/2QgLwNZLPu8V+7IGu6G4vB28lN4Uy7xbhxFOKtyWu
+ fK4sEdTw3B/aDN0tB8MHFdPYycNZsEac
+ -----END CERTIFICATE REQUEST-----
+ expiry_time: P180DT
+
+- name: delete certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ state: absent
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+
+# For ONTAP 9.6 or 9.7, use common_name and type, in addition to, or in lieu of name
+- name: install certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_common_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+ svm: "{{ vserver }}"
+
+- name: create certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ expiry_time: P365DT # one year
+
+- name: sign certificate using newly create certificate
+ tags: sign_request
+ na_ontap_security_certificates:
+ # <<: *login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ signing_request: |
+ -----BEGIN CERTIFICATE REQUEST-----
+ MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
+ DAlTdW5ueXZhbGUxDzANBgNVBAoMBk5ldEFwcDCCASIwDQYJKoZIhvcNAQEBBQAD
+ ggEPADCCAQoCggEBALgXCj6Si/I4xLdV7wjWYTbt8jY20fQOjk/4E7yBT1vFBflE
+ ks6YDc6dhC2G18cnoj9E3DiR8lIHPoAlFB/VmBNDev3GZkbFlrbV7qYmf8OEx2H2
+ tAefgSP0jLmCHCN1yyhJoCG6FsAiD3tf6yoyFF6qS9ureGL0tCJJ/osx64WzUz+Q
+ EN8lx7VSxriEFMSjreXZDhUFaCdIYKKRENuEWyYvdy5cbBmczhuM8EP6peOVv5Hm
+ BJzPUDkq7oTtEHmttpATq2Y92qzNzETO0bXN5X/93AWri8/yEXdX+HEw1C/omtsE
+ jGsCXrCrIJ+DgUdT/GHNdBWlXl/cWGtEgEQ4vrUCAwEAAaAAMA0GCSqGSIb3DQEB
+ CwUAA4IBAQBjZNoQgr/JDm1T8zyRhLkl3zw4a16qKNu/MS7prqZHLVQgrptHRegU
+ Hbz11XoHfVOdbyuvtzEe95QsDd6FYCZ4qzZRF3se4IjMeqwdQZ5WP0/GFiwM8Uln
+ /0TCWjt759XMeUX7+wgOg5NRjJ660eWMXzu/UJf+vZO0Q2FiPIr13JvvY3TjT+9J
+ UUtK4r9PaUuOPN2YL9IQqSD3goh8302Qr3nBXUgjeUGLkgfUM5S39apund2hyTX2
+ JCLQsKr88pwU9iDho2tHLv/2QgLwNZLPu8V+7IGu6G4vB28lN4Uy7xbhxFOKtyWu
+ fK4sEdTw3B/aDN0tB8MHFdPYycNZsEac
+ -----END CERTIFICATE REQUEST-----
+ expiry_time: P180DT
+
+- name: delete certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ state: absent
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+"""
+
+RETURN = """
+ontap_info:
+ description: Returns public_certificate when signing, empty for create, install, and delete.
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_info": {
+ "public_certificate": "-----BEGIN CERTIFICATE-----\n........-----END CERTIFICATE-----\n"
+ }
+ }'
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppOntapSecurityCertificates(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ common_name=dict(required=False, type='str'),
+ name=dict(required=False, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ type=dict(required=False, choices=['client', 'server', 'client_ca', 'server_ca', 'root_ca']),
+ svm=dict(required=False, type='str', aliases=['vserver']),
+ public_certificate=dict(required=False, type='str'),
+ private_key=dict(required=False, type='str'),
+ signing_request=dict(required=False, type='str'),
+ expiry_time=dict(required=False, type='str'),
+ key_size=dict(required=False, type='int'),
+ hash_function=dict(required=False, type='str'),
+ intermediate_certificates=dict(required=False, type='list', elements='str'),
+ ignore_name_if_not_supported=dict(required=False, type='bool', default=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if self.parameters.get('name') is None:
+ if self.parameters.get('common_name') is None or self.parameters.get('type') is None:
+ error = "'name' or ('common_name' and 'type') are required parameters."
+ self.module.fail_json(msg=error)
+
+ # ONTAP 9.6 and 9.7 do not support name. We'll change this to True if we detect an issue.
+ self.ignore_name_param = False
+
+ # API should be used for ONTAP 9.6 or higher
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_security_certificates'))
+
+ def get_certificate(self):
+ """
+ Fetch uuid if certificate exists.
+ NOTE: because of a bug in ONTAP 9.6 and 9.7, name is not supported. We are
+ falling back to using common_name and type, but unicity is not guaranteed.
+ :return:
+ Dictionary if certificate with same name is found
+ None if not found
+ """
+ error = "'name' or ('common_name', 'type') are required."
+ for key in ('name', 'common_name'):
+ if self.parameters.get(key) is None:
+ continue
+ data = {'fields': 'uuid',
+ key: self.parameters[key],
+ }
+ if self.parameters.get('svm') is not None:
+ data['svm.name'] = self.parameters['svm']
+ else:
+ data['scope'] = 'cluster'
+ if key == 'common_name':
+ if self.parameters.get('type') is not None:
+ data['type'] = self.parameters['type']
+ else:
+ error = "When using 'common_name', 'type' is required."
+ break
+
+ api = "security/certificates"
+ message, error = self.rest_api.get(api, data)
+ if error:
+ try:
+ name_not_supported_error = (key == 'name') and (error['message'] == 'Unexpected argument "name".')
+ except (KeyError, TypeError):
+ name_not_supported_error = False
+ if name_not_supported_error:
+ if self.parameters['ignore_name_if_not_supported'] and self.parameters.get('common_name') is not None:
+ # let's attempt a retry using common_name
+ self.ignore_name_param = True
+ continue
+ error = "ONTAP 9.6 and 9.7 do not support 'name'. Use 'common_name' and 'type' as a work-around."
+ # report success, or any other error as is
+ break
+
+ if error:
+ self.module.fail_json(msg='Error calling API: %s - %s' % (api, error))
+
+ if len(message['records']) == 1:
+ return message['records'][0]
+ if len(message['records']) > 1:
+ error = 'Duplicate records with same common_name are preventing safe operations: %s' % repr(message)
+ self.module.fail_json(msg=error)
+ return None
+
+ def create_or_install_certificate(self):
+ """
+ Create or install certificate
+ :return: message (should be empty dict)
+ """
+ required_keys = ['type', 'common_name']
+ optional_keys = ['public_certificate', 'private_key', 'expiry_time', 'key_size', 'hash_function']
+ if not self.ignore_name_param:
+ optional_keys.append('name')
+ # special key: svm
+
+ if not set(required_keys).issubset(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error creating or installing certificate: one or more of the following options are missing: %s'
+ % (', '.join(required_keys)))
+
+ data = dict()
+ if self.parameters.get('svm') is not None:
+ data['svm'] = {'name': self.parameters['svm']}
+ for key in required_keys + optional_keys:
+ if self.parameters.get(key) is not None:
+ data[key] = self.parameters[key]
+ api = "security/certificates"
+ message, error = self.rest_api.post(api, data)
+ if error:
+ if self.parameters.get('svm') is None and error.get('target') == 'uuid':
+ error['target'] = 'cluster'
+ if error.get('message') == 'duplicate entry':
+ error['message'] += '. Same certificate may already exist under a different name.'
+ self.module.fail_json(msg="Error creating or installing certificate: %s" % error)
+ return message
+
+ def sign_certificate(self, uuid):
+ """
+ sign certificate
+ :return: a dictionary with key "public_certificate"
+ """
+ api = "security/certificates/%s/sign" % uuid
+ data = {'signing_request': self.parameters['signing_request']}
+ optional_keys = ['expiry_time', 'hash_function']
+ for key in optional_keys:
+ if self.parameters.get(key) is not None:
+ data[key] = self.parameters[key]
+ message, error = self.rest_api.post(api, data)
+ if error:
+ self.module.fail_json(msg="Error signing certificate: %s" % error)
+ return message
+
+ def delete_certificate(self, uuid):
+ """
+ Delete certificate
+ :return: message (should be empty dict)
+ """
+ api = "security/certificates/%s" % uuid
+ message, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg="Error deleting certificate: %s" % error)
+ return message
+
+ def apply(self):
+ """
+ Apply action to create/install/sign/delete certificate
+ :return: None
+ """
+ # TODO: add telemetry for REST
+
+ current = self.get_certificate()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ message = None
+ if self.parameters.get('signing_request') is not None:
+ error = None
+ if self.parameters['state'] == 'absent':
+ error = "'signing_request' is not supported with 'state' set to 'absent'"
+ elif current is None:
+ scope = 'cluster' if self.parameters.get('svm') is None else "svm: %s" % self.parameters.get('svm')
+ error = "signing certificate with name '%s' not found on %s" % (self.parameters.get('name'), scope)
+ elif cd_action is not None:
+ error = "'signing_request' is exclusive with other actions: create, install, delete"
+ if error is not None:
+ self.module.fail_json(msg=error)
+ self.na_helper.changed = True
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ message = self.create_or_install_certificate()
+ elif cd_action == 'delete':
+ message = self.delete_certificate(current['uuid'])
+ elif self.parameters.get('signing_request') is not None:
+ message = self.sign_certificate(current['uuid'])
+
+ results = {'changed': self.na_helper.changed}
+ if message:
+ results['ontap_info'] = message
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Create instance and invoke apply
+ :return: None
+ """
+ sec_cert = NetAppOntapSecurityCertificates()
+ sec_cert.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py
new file mode 100644
index 00000000..26e3b5e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_security_key_manager
+
+short_description: NetApp ONTAP security key manager.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Add or delete or setup key management on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified key manager should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ ip_address:
+ description:
+ - The IP address of the key management server.
+ required: true
+ type: str
+
+ tcp_port:
+ description:
+ - The TCP port on which the key management server listens for incoming connections.
+ default: 5696
+ type: int
+
+ node:
+ description:
+ - The node which key management server runs on.
+ type: str
+
+'''
+
+EXAMPLES = """
+
+ - name: Delete Key Manager
+ tags:
+ - delete
+ na_ontap_security_key_manager:
+ state: absent
+ node: swenjun-vsim1
+ hostname: "{{ hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ ip_address: 0.0.0.0
+
+ - name: Add Key Manager
+ tags:
+ - add
+ na_ontap_security_key_manager:
+ state: present
+ node: swenjun-vsim1
+ hostname: "{{ hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ ip_address: 0.0.0.0
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSecurityKeyManager(object):
+ '''class with key manager operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ ip_address=dict(required=True, type='str'),
+ node=dict(required=False, type='str'),
+ tcp_port=dict(required=False, type='int', default=5696)
+ )
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required"
+ )
+ else:
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_key_manager(self):
+ """
+ get key manager by ip address.
+ :return: a dict of key manager
+ """
+ key_manager_info = netapp_utils.zapi.NaElement('security-key-manager-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'key-manager-info', **{'key-manager-ip-address': self.parameters['ip_address']})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ key_manager_info.add_child_elem(query)
+
+ try:
+ result = self.cluster.invoke_successfully(key_manager_info, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching key manager %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ key_manager = result.get_child_by_name('attributes-list').get_child_by_name('key-manager-info')
+ return_value = {}
+ if key_manager.get_child_by_name('key-manager-ip-address'):
+ return_value['ip_address'] = key_manager.get_child_content('key-manager-ip-address')
+ if key_manager.get_child_by_name('key-manager-server-status'):
+ return_value['server_status'] = key_manager.get_child_content('key-manager-server-status')
+ if key_manager.get_child_by_name('key-manager-tcp-port'):
+ return_value['tcp_port'] = key_manager.get_child_content('key-manager-tcp-port')
+ if key_manager.get_child_by_name('node-name'):
+ return_value['node'] = key_manager.get_child_content('node-name')
+
+ return return_value
+
+ def key_manager_setup(self):
+ """
+ set up external key manager.
+ """
+ key_manager_setup = netapp_utils.zapi.NaElement('security-key-manager-setup')
+ # if specify on-boarding passphrase, it is on-boarding key management.
+ # it not, then it's external key management.
+ try:
+ self.cluster.invoke_successfully(key_manager_setup, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting up key manager %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_key_manager(self):
+ """
+ add key manager.
+ """
+ key_manager_create = netapp_utils.zapi.NaElement('security-key-manager-add')
+ key_manager_create.add_new_child('key-manager-ip-address', self.parameters['ip_address'])
+ if self.parameters.get('tcp_port'):
+ key_manager_create.add_new_child('key-manager-tcp-port', str(self.parameters['tcp_port']))
+ try:
+ self.cluster.invoke_successfully(key_manager_create, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating key manager %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_key_manager(self):
+ """
+ delete key manager.
+ """
+ key_manager_delete = netapp_utils.zapi.NaElement('security-key-manager-delete')
+ key_manager_delete.add_new_child('key-manager-ip-address', self.parameters['ip_address'])
+ try:
+ self.cluster.invoke_successfully(key_manager_delete, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting key manager %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_security_key_manager")
+ self.key_manager_setup()
+ current = self.get_key_manager()
+ cd_action = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_key_manager()
+ elif cd_action == 'delete':
+ self.delete_key_manager()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.cluster)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapSecurityKeyManager()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py
new file mode 100644
index 00000000..63de30cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_service_processor_network
+short_description: NetApp ONTAP service processor network
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify a ONTAP service processor network
+options:
+ state:
+ description:
+ - Whether the specified service processor network should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+ address_type:
+ description:
+ - Specify address class.
+ required: true
+ type: str
+ choices: ['ipv4', 'ipv6']
+ is_enabled:
+ description:
+ - Specify whether to enable or disable the service processor network.
+ required: true
+ type: bool
+ node:
+ description:
+ - The node where the service processor network should be enabled
+ required: true
+ type: str
+ dhcp:
+ description:
+ - Specify dhcp type.
+ type: str
+ choices: ['v4', 'none']
+ gateway_ip_address:
+ description:
+ - Specify the gateway ip.
+ type: str
+ ip_address:
+ description:
+ - Specify the service processor ip address.
+ type: str
+ netmask:
+ description:
+ - Specify the service processor netmask.
+ type: str
+ prefix_length:
+ description:
+ - Specify the service processor prefix_length.
+ type: int
+ wait_for_completion:
+ description:
+ - Set this parameter to 'true' for synchronous execution (wait until SP status is successfully updated)
+ - Set this parameter to 'false' for asynchronous execution
+ - For asynchronous, execution exits as soon as the request is sent, without checking SP status
+ type: bool
+ default: false
+ version_added: 2.8.0
+'''
+
+EXAMPLES = """
+ - name: Modify Service Processor Network
+ na_ontap_service_processor_network:
+ state: present
+ address_type: ipv4
+ is_enabled: true
+ dhcp: v4
+ node: "{{ netapp_node }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import time
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapServiceProcessorNetwork(object):
+ """
+ Modify a Service Processor Network
+ """
+
+ def __init__(self):
+ """
+ Initialize the NetAppOntapServiceProcessorNetwork class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ address_type=dict(required=True, type='str', choices=['ipv4', 'ipv6']),
+ is_enabled=dict(required=True, type='bool'),
+ node=dict(required=True, type='str'),
+ dhcp=dict(required=False, type='str', choices=['v4', 'none']),
+ gateway_ip_address=dict(required=False, type='str'),
+ ip_address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ prefix_length=dict(required=False, type='int'),
+ wait_for_completion=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=None)
+ return
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'address_type': 'address-type',
+ 'node': 'node',
+ 'dhcp': 'dhcp',
+ 'gateway_ip_address': 'gateway-ip-address',
+ 'ip_address': 'ip-address',
+ 'netmask': 'netmask'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'prefix_length': 'prefix-length'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'is_enabled': 'is-enabled',
+ }
+ self.na_helper.zapi_required = {
+ 'address_type': 'address-type',
+ 'node': 'node',
+ 'is_enabled': 'is-enabled'
+ }
+
+ def get_sp_network_status(self):
+ """
+ Return status of service processor network
+ :param:
+ name : name of the node
+ :return: Status of the service processor network
+ :rtype: dict
+ """
+ spn_get_iter = netapp_utils.zapi.NaElement('service-processor-network-get-iter')
+ query_info = {
+ 'query': {
+ 'service-processor-network-info': {
+ 'node': self.parameters['node'],
+ 'address-type': self.parameters['address_type']
+ }
+ }
+ }
+ spn_get_iter.translate_struct(query_info)
+ result = self.server.invoke_successfully(spn_get_iter, True)
+ if int(result['num-records']) >= 1:
+ sp_attr_info = result['attributes-list']['service-processor-network-info']
+ return sp_attr_info.get_child_content('setup-status')
+ return None
+
+ def get_service_processor_network(self):
+ """
+ Return details about service processor network
+ :param:
+ name : name of the node
+ :return: Details about service processor network. None if not found.
+ :rtype: dict
+ """
+ spn_get_iter = netapp_utils.zapi.NaElement('service-processor-network-get-iter')
+ query_info = {
+ 'query': {
+ 'service-processor-network-info': {
+ 'node': self.parameters['node']
+ }
+ }
+ }
+ spn_get_iter.translate_struct(query_info)
+ result = self.server.invoke_successfully(spn_get_iter, True)
+ sp_details = None
+ # check if job exists
+ if int(result['num-records']) >= 1:
+ sp_details = dict()
+ sp_attr_info = result['attributes-list']['service-processor-network-info']
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ sp_details[item_key] = sp_attr_info.get_child_content(zapi_key)
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ sp_details[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=sp_attr_info.get_child_content(zapi_key))
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ sp_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=sp_attr_info.get_child_content(zapi_key))
+ return sp_details
+
+ def modify_service_processor_network(self, params=None):
+ """
+ Modify a service processor network.
+ :param params: A dict of modified options.
+ When dhcp is not set to v4, ip_address, netmask, and gateway_ip_address must be specified even if remains the same.
+ """
+ if self.parameters['is_enabled'] is False:
+ if params.get('is_enabled') and len(params) > 1:
+ self.module.fail_json(msg='Error: Cannot modify any other parameter for a service processor network if option "is_enabled" is set to false.')
+ elif params.get('is_enabled') is None and len(params) > 0:
+ self.module.fail_json(msg='Error: Cannot modify a service processor network if it is disabled.')
+
+ sp_modify = netapp_utils.zapi.NaElement('service-processor-network-modify')
+ sp_modify.add_new_child("node", self.parameters['node'])
+ sp_modify.add_new_child("address-type", self.parameters['address_type'])
+ sp_attributes = dict()
+ for item_key in self.parameters:
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ sp_attributes[zapi_key] = self.parameters[item_key]
+ elif item_key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item_key)
+ sp_attributes[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters[item_key])
+ elif item_key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(item_key)
+ sp_attributes[zapi_key] = self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters[item_key])
+ sp_modify.translate_struct(sp_attributes)
+ try:
+ self.server.invoke_successfully(sp_modify, enable_tunneling=True)
+ if self.parameters.get('wait_for_completion'):
+ retries = 10
+ while self.get_sp_network_status() == 'in_progress' and retries > 0:
+ time.sleep(10)
+ retries = retries - 1
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying service processor network: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_service_processor_network", cserver)
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ self.autosupport_log()
+ current = self.get_service_processor_network()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if not current:
+ self.module.fail_json(msg='Error No Service Processor for node: %s' % self.parameters['node'])
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.modify_service_processor_network(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create the NetApp Ontap Service Processor Network Object and modify it
+ """
+
+ obj = NetAppOntapServiceProcessorNetwork()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py
new file mode 100644
index 00000000..00338142
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py
@@ -0,0 +1,895 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Update/Initialize/Break/Resync/Resume SnapMirror volume/vserver relationships for ONTAP/ONTAP
+ - Create/Delete/Update/Initialize SnapMirror volume relationship between ElementSW and ONTAP
+ - Modify schedule for a SnapMirror relationship for ONTAP/ONTAP and ElementSW/ONTAP
+ - Pre-requisite for ElementSW to ONTAP relationship or vice-versa is an established SnapMirror endpoint for ONTAP cluster with ElementSW UI
+ - Pre-requisite for ElementSW to ONTAP relationship or vice-versa is to have SnapMirror enabled in the ElementSW volume
+ - For creating a SnapMirror ElementSW/ONTAP relationship, an existing ONTAP/ElementSW relationship should be present
+ - Performs resync if the C(relationship_state=active) and the current mirror state of the snapmirror relationship is broken-off
+ - Performs resume if the C(relationship_state=active), the current snapmirror relationship status is quiesced and mirror state is snapmirrored
+ - Performs restore if the C(relationship_type=restore) and all other operations will not be performed during this task
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_snapmirror
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified relationship should exist or not.
+ default: present
+ type: str
+ source_volume:
+ description:
+ - Specifies the name of the source volume for the SnapMirror.
+ type: str
+ destination_volume:
+ description:
+ - Specifies the name of the destination volume for the SnapMirror.
+ type: str
+ source_vserver:
+ description:
+ - Name of the source vserver for the SnapMirror.
+ type: str
+ destination_vserver:
+ description:
+ - Name of the destination vserver for the SnapMirror.
+ type: str
+ source_path:
+ description:
+ - Specifies the source endpoint of the SnapMirror relationship.
+ - If the source is an ONTAP volume, format should be <[vserver:][volume]> or <[[cluster:]//vserver/]volume>
+ - If the source is an ElementSW volume, format should be <[Element_SVIP]:/lun/[Element_VOLUME_ID]>
+ - If the source is an ElementSW volume, the volume should have SnapMirror enabled.
+ type: str
+ destination_path:
+ description:
+ - Specifies the destination endpoint of the SnapMirror relationship.
+ type: str
+ relationship_type:
+ choices: ['data_protection', 'load_sharing', 'vault', 'restore', 'transition_data_protection',
+ 'extended_data_protection']
+ type: str
+ description:
+ - Specify the type of SnapMirror relationship.
+ - for 'restore' unless 'source_snapshot' is specified the most recent Snapshot copy on the source volume is restored.
+ - restore SnapMirror is not idempotent.
+ schedule:
+ description:
+ - Specify the name of the current schedule, which is used to update the SnapMirror relationship.
+ - Optional for create, modifiable.
+ type: str
+ policy:
+ description:
+ - Specify the name of the SnapMirror policy that applies to this relationship.
+ version_added: 2.8.0
+ type: str
+ source_hostname:
+ description:
+ - Source hostname or management IP address for ONTAP or ElementSW cluster.
+ - Required for SnapMirror delete
+ type: str
+ source_username:
+ description:
+ - Source username for ONTAP or ElementSW cluster.
+ - Optional if this is same as destination username.
+ type: str
+ source_password:
+ description:
+ - Source password for ONTAP or ElementSW cluster.
+ - Optional if this is same as destination password.
+ type: str
+ connection_type:
+ description:
+ - Type of SnapMirror relationship.
+ - Pre-requisite for either elementsw_ontap or ontap_elementsw the ElementSW volume should have enableSnapmirror option set to true.
+ - For using ontap_elementsw, elementsw_ontap snapmirror relationship should exist.
+ choices: ['ontap_ontap', 'elementsw_ontap', 'ontap_elementsw']
+ default: ontap_ontap
+ type: str
+ version_added: 2.9.0
+ max_transfer_rate:
+ description:
+ - Specifies the upper bound, in kilobytes per second, at which data is transferred.
+ - Default is unlimited, it can be explicitly set to 0 as unlimited.
+ type: int
+ version_added: 2.9.0
+ initialize:
+ description:
+ - Specifies whether to initialize SnapMirror relation.
+ - Default is True, it can be explicitly set to False to avoid initializing SnapMirror relation.
+ default: true
+ type: bool
+ version_added: '19.11.0'
+ update:
+ description:
+ - Specifies whether to update the destination endpoint of the SnapMirror relationship only if the relationship is already present and active.
+ - Default is True.
+ default: true
+ type: bool
+ version_added: '20.2.0'
+ relationship_info_only:
+ description:
+ - If relationship-info-only is set to true then only relationship information is removed.
+ default: false
+ type: bool
+ version_added: '20.4.0'
+ relationship_state:
+ description:
+ - Specifies whether to break SnapMirror relation or establish a SnapMirror relationship.
+ - state must be present to use this option.
+ default: active
+ choices: ['active', 'broken']
+ type: str
+ version_added: '20.2.0'
+ source_snapshot:
+ description:
+ - Specifies the Snapshot from the source to be restored.
+ type: str
+ version_added: '20.6.0'
+ identity_preserve:
+ description:
+ - Specifies whether or not the identity of the source Vserver is replicated to the destination Vserver.
+ - If this parameter is set to true, the source Vserver's configuration will additionally be replicated to the destination.
+ - If the parameter is set to false, then only the source Vserver's volumes and RBAC configuration are replicated to the destination.
+ type: bool
+ version_added: 2.9.0
+short_description: "NetApp ONTAP or ElementSW Manage SnapMirror"
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ # creates and initializes the snapmirror
+ - name: Create ONTAP/ONTAP SnapMirror
+ na_ontap_snapmirror:
+ state: present
+ source_volume: test_src
+ destination_volume: test_dest
+ source_vserver: ansible_src
+ destination_vserver: ansible_dest
+ schedule: hourly
+ policy: MirrorAllSnapshots
+ max_transfer_rate: 1000
+ initialize: False
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ # creates and initializes the snapmirror between vservers
+ - name: Create ONTAP/ONTAP vserver SnapMirror
+ na_ontap_snapmirror:
+ state: present
+ source_vserver: ansible_src
+ destination_vserver: ansible_dest
+ identity_preserve: true
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ # existing snapmirror relation with status 'snapmirrored' will be initiailzed
+ - name: Inititalize ONTAP/ONTAP SnapMirror
+ na_ontap_snapmirror:
+ state: present
+ source_path: 'ansible:test'
+ destination_path: 'ansible:dest'
+ relationship_state: active
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Delete SnapMirror
+ na_ontap_snapmirror:
+ state: absent
+ destination_path: <path>
+ relationship_info_only: True
+ source_hostname: "{{ source_hostname }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Break Snapmirror
+ na_ontap_snapmirror:
+ state: present
+ relationship_state: broken
+ destination_path: <path>
+ source_hostname: "{{ source_hostname }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Restore Snapmirror volume using location (Idempotency)
+ na_ontap_snapmirror:
+ state: present
+ source_path: <path>
+ destination_path: <path>
+ relationship_type: restore
+ source_snapshot: "{{ snapshot }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Set schedule to NULL
+ na_ontap_snapmirror:
+ state: present
+ destination_path: <path>
+ schedule: ""
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Create SnapMirror from ElementSW to ONTAP
+ na_ontap_snapmirror:
+ state: present
+ connection_type: elementsw_ontap
+ source_path: '10.10.10.10:/lun/300'
+ destination_path: 'ansible_test:ansible_dest_vol'
+ schedule: hourly
+ policy: MirrorLatest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ source_hostname: " {{ Element_cluster_mvip }}"
+ source_username: "{{ Element_cluster_username }}"
+ source_password: "{{ Element_cluster_password }}"
+
+ - name: Create SnapMirror from ONTAP to ElementSW
+ na_ontap_snapmirror:
+ state: present
+ connection_type: ontap_elementsw
+ destination_path: '10.10.10.10:/lun/300'
+ source_path: 'ansible_test:ansible_dest_vol'
+ policy: MirrorLatest
+ hostname: "{{ Element_cluster_mvip }}"
+ username: "{{ Element_cluster_username }}"
+ password: "{{ Element_cluster_password }}"
+ source_hostname: " {{ netapp_hostname }}"
+ source_username: "{{ netapp_username }}"
+ source_password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import time
+import re
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class NetAppONTAPSnapmirror(object):
+ """
+ Class with Snapmirror methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ source_vserver=dict(required=False, type='str'),
+ destination_vserver=dict(required=False, type='str'),
+ source_volume=dict(required=False, type='str'),
+ destination_volume=dict(required=False, type='str'),
+ source_path=dict(required=False, type='str'),
+ destination_path=dict(required=False, type='str'),
+ schedule=dict(required=False, type='str'),
+ policy=dict(required=False, type='str'),
+ relationship_type=dict(required=False, type='str',
+ choices=['data_protection', 'load_sharing',
+ 'vault', 'restore',
+ 'transition_data_protection',
+ 'extended_data_protection']
+ ),
+ source_hostname=dict(required=False, type='str'),
+ connection_type=dict(required=False, type='str',
+ choices=['ontap_ontap', 'elementsw_ontap', 'ontap_elementsw'],
+ default='ontap_ontap'),
+ source_username=dict(required=False, type='str'),
+ source_password=dict(required=False, type='str', no_log=True),
+ max_transfer_rate=dict(required=False, type='int'),
+ initialize=dict(required=False, type='bool', default=True),
+ update=dict(required=False, type='bool', default=True),
+ identity_preserve=dict(required=False, type='bool'),
+ relationship_state=dict(required=False, type='str', choices=['active', 'broken'], default='active'),
+ relationship_info_only=dict(required=False, type='bool', default=False),
+ source_snapshot=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=(['source_volume', 'destination_volume'],
+ ['source_vserver', 'destination_vserver']),
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # setup later if required
+ self.source_server = None
+ # only for ElementSW -> ONTAP snapmirroring, validate if ElementSW SDK is available
+ if self.parameters.get('connection_type') in ['elementsw_ontap', 'ontap_elementsw']:
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ if self.parameters.get('connection_type') != 'ontap_elementsw':
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ else:
+ if self.parameters.get('source_username'):
+ self.module.params['username'] = self.parameters['source_username']
+ if self.parameters.get('source_password'):
+ self.module.params['password'] = self.parameters['source_password']
+ self.module.params['hostname'] = self.parameters['source_hostname']
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def set_element_connection(self, kind):
+ if kind == 'source':
+ self.module.params['hostname'] = self.parameters['source_hostname']
+ self.module.params['username'] = self.parameters['source_username']
+ self.module.params['password'] = self.parameters['source_password']
+ elif kind == 'destination':
+ self.module.params['hostname'] = self.parameters['hostname']
+ self.module.params['username'] = self.parameters['username']
+ self.module.params['password'] = self.parameters['password']
+ elem = netapp_utils.create_sf_connection(module=self.module)
+ elementsw_helper = NaElementSWModule(elem)
+ return elementsw_helper, elem
+
+ def snapmirror_get_iter(self, destination=None):
+ """
+ Compose NaElement object to query current SnapMirror relations using destination-path
+ SnapMirror relation for a destination path is unique
+ :return: NaElement object for SnapMirror-get-iter
+ """
+ snapmirror_get_iter = netapp_utils.zapi.NaElement('snapmirror-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ snapmirror_info = netapp_utils.zapi.NaElement('snapmirror-info')
+ if destination is None:
+ destination = self.parameters['destination_path']
+ snapmirror_info.add_new_child('destination-location', destination)
+ query.add_child_elem(snapmirror_info)
+ snapmirror_get_iter.add_child_elem(query)
+ return snapmirror_get_iter
+
+ def snapmirror_get(self, destination=None):
+ """
+ Get current SnapMirror relations
+ :return: Dictionary of current SnapMirror details if query successful, else None
+ """
+ snapmirror_get_iter = self.snapmirror_get_iter(destination)
+ snap_info = dict()
+ try:
+ result = self.server.invoke_successfully(snapmirror_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snapmirror info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ snapmirror_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'snapmirror-info')
+ snap_info['mirror_state'] = snapmirror_info.get_child_content('mirror-state')
+ snap_info['status'] = snapmirror_info.get_child_content('relationship-status')
+ snap_info['schedule'] = snapmirror_info.get_child_content('schedule')
+ snap_info['policy'] = snapmirror_info.get_child_content('policy')
+ snap_info['relationship'] = snapmirror_info.get_child_content('relationship-type')
+ if snapmirror_info.get_child_by_name('max-transfer-rate'):
+ snap_info['max_transfer_rate'] = int(snapmirror_info.get_child_content('max-transfer-rate'))
+ if snap_info['schedule'] is None:
+ snap_info['schedule'] = ""
+ return snap_info
+ return None
+
+ def check_if_remote_volume_exists(self):
+ """
+ Validate existence of source volume
+ :return: True if volume exists, False otherwise
+ """
+ self.set_source_cluster_connection()
+ # do a get volume to check if volume exists or not
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', self.parameters['source_volume'])
+ # if source_volume is present, then source_vserver is also guaranteed to be present
+ volume_id_attributes.add_new_child('vserver-name', self.parameters['source_vserver'])
+ volume_attributes.add_child_elem(volume_id_attributes)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+ volume_info.add_child_elem(query)
+ try:
+ result = self.source_server.invoke_successfully(volume_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching source volume details %s : %s'
+ % (self.parameters['source_volume'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ return True
+ return False
+
+ def snapmirror_create(self):
+ """
+ Create a SnapMirror relationship
+ """
+ if self.parameters.get('source_hostname') and self.parameters.get('source_volume'):
+ if not self.check_if_remote_volume_exists():
+ self.module.fail_json(msg='Source volume does not exist. Please specify a volume that exists')
+ options = {'source-location': self.parameters['source_path'],
+ 'destination-location': self.parameters['destination_path']}
+ snapmirror_create = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-create', **options)
+ if self.parameters.get('relationship_type'):
+ snapmirror_create.add_new_child('relationship-type', self.parameters['relationship_type'])
+ if self.parameters.get('schedule'):
+ snapmirror_create.add_new_child('schedule', self.parameters['schedule'])
+ if self.parameters.get('policy'):
+ snapmirror_create.add_new_child('policy', self.parameters['policy'])
+ if self.parameters.get('max_transfer_rate'):
+ snapmirror_create.add_new_child('max-transfer-rate', str(self.parameters['max_transfer_rate']))
+ if self.parameters.get('identity_preserve'):
+ snapmirror_create.add_new_child('identity-preserve', str(self.parameters['identity_preserve']))
+ try:
+ self.server.invoke_successfully(snapmirror_create, enable_tunneling=True)
+ if self.parameters['initialize']:
+ self.snapmirror_initialize()
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating SnapMirror %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ def set_source_cluster_connection(self):
+ """
+ Setup ontap ZAPI server connection for source hostname
+ :return: None
+ """
+ if self.parameters.get('source_username'):
+ self.module.params['username'] = self.parameters['source_username']
+ if self.parameters.get('source_password'):
+ self.module.params['password'] = self.parameters['source_password']
+ self.module.params['hostname'] = self.parameters['source_hostname']
+ self.source_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def delete_snapmirror(self, is_hci, relationship_type, mirror_state):
+ """
+ Delete a SnapMirror relationship
+ #1. Quiesce the SnapMirror relationship at destination
+ #2. Break the SnapMirror relationship at the destination
+ #3. Release the SnapMirror at source
+ #4. Delete SnapMirror at destination
+ """
+ if not is_hci:
+ if not self.parameters.get('source_hostname'):
+ self.module.fail_json(msg='Missing parameters for delete: Please specify the '
+ 'source cluster hostname to release the SnapMirror relationship')
+ # Quiesce and Break at destination
+ if relationship_type not in ['load_sharing', 'vault'] and mirror_state not in ['uninitialized', 'broken-off']:
+ self.snapmirror_break()
+ # if source is ONTAP, release the destination at source cluster
+ if not is_hci:
+ self.set_source_cluster_connection()
+ if self.get_destination():
+ # Release at source
+ self.snapmirror_release()
+ # Delete at destination
+ self.snapmirror_delete()
+
+ def snapmirror_quiesce(self):
+ """
+ Quiesce SnapMirror relationship - disable all future transfers to this destination
+ """
+ result = None
+ options = {'destination-location': self.parameters['destination_path']}
+
+ snapmirror_quiesce = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-quiesce', **options)
+ try:
+ result = self.server.invoke_successfully(snapmirror_quiesce, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Quiescing SnapMirror : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ # checking if quiesce was passed successfully
+ if result is not None and result['status'] == 'passed':
+ return
+ elif result is not None and result['status'] != 'passed':
+ retries = 5
+ while retries > 0:
+ time.sleep(5)
+ retries = retries - 1
+ status = self.snapmirror_get()
+ if status['status'] == 'quiesced':
+ return
+ if retries == 0:
+ self.module.fail_json(msg='Taking a long time to Quiescing SnapMirror, try again later')
+
+ def snapmirror_delete(self):
+ """
+ Delete SnapMirror relationship at destination cluster
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+
+ snapmirror_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-destroy', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting SnapMirror : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_break(self, destination=None):
+ """
+ Break SnapMirror relationship at destination cluster
+ #1. Quiesce the SnapMirror relationship at destination
+ #2. Break the SnapMirror relationship at the destination
+ """
+ self.snapmirror_quiesce()
+ if destination is None:
+ destination = self.parameters['destination_path']
+ options = {'destination-location': destination}
+ snapmirror_break = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-break', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_break,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error breaking SnapMirror relationship : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_release(self):
+ """
+ Release SnapMirror relationship from source cluster
+ """
+ options = {'destination-location': self.parameters['destination_path'],
+ 'relationship-info-only': self.na_helper.get_value_for_bool(False, self.parameters['relationship_info_only'])}
+ snapmirror_release = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-release', **options)
+ try:
+ self.source_server.invoke_successfully(snapmirror_release,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error releasing SnapMirror relationship : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_abort(self):
+ """
+ Abort a SnapMirror relationship in progress
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_abort = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-abort', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_abort,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error aborting SnapMirror relationship : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_initialize(self):
+ """
+ Initialize SnapMirror based on relationship state
+ """
+ current = self.snapmirror_get()
+ if current['mirror_state'] != 'snapmirrored':
+ initialize_zapi = 'snapmirror-initialize'
+ if self.parameters.get('relationship_type') and self.parameters['relationship_type'] == 'load_sharing':
+ initialize_zapi = 'snapmirror-initialize-ls-set'
+ options = {'source-location': self.parameters['source_path']}
+ else:
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_init = netapp_utils.zapi.NaElement.create_node_with_children(
+ initialize_zapi, **options)
+ try:
+ self.server.invoke_successfully(snapmirror_init,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error initializing SnapMirror : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_resync(self):
+ """
+ resync SnapMirror based on relationship state
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_resync = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-resync', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_resync, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error resyncing SnapMirror : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_resume(self):
+ """
+ resume SnapMirror based on relationship state
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_resume = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-resume', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_resume, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error resume SnapMirror : %s' % (to_native(error)), exception=traceback.format_exc())
+
+ def snapmirror_restore(self):
+ """
+ restore SnapMirror based on relationship state
+ """
+ options = {'destination-location': self.parameters['destination_path'],
+ 'source-location': self.parameters['source_path']}
+ if self.parameters.get('source_snapshot'):
+ options['source-snapshot'] = self.parameters['source_snapshot']
+ snapmirror_restore = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-restore', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_restore, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error restore SnapMirror : %s' % (to_native(error)), exception=traceback.format_exc())
+
+ def snapmirror_modify(self, modify):
+ """
+ Modify SnapMirror schedule or policy
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-modify', **options)
+ if modify.get('schedule') is not None:
+ snapmirror_modify.add_new_child('schedule', modify.get('schedule'))
+ if modify.get('policy'):
+ snapmirror_modify.add_new_child('policy', modify.get('policy'))
+ if modify.get('max_transfer_rate'):
+ snapmirror_modify.add_new_child('max-transfer-rate', str(modify.get('max_transfer_rate')))
+ try:
+ self.server.invoke_successfully(snapmirror_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying SnapMirror schedule or policy : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_update(self):
+ """
+ Update data in destination endpoint
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_update = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-update', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_update, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating SnapMirror : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def check_parameters(self):
+ """
+ Validate parameters and fail if one or more required params are missing
+ Update source and destination path from vserver and volume parameters
+ """
+ if self.parameters['state'] == 'present'\
+ and (self.parameters.get('source_path') or self.parameters.get('destination_path')):
+ if not self.parameters.get('destination_path') or not self.parameters.get('source_path'):
+ self.module.fail_json(msg='Missing parameters: Source path or Destination path')
+ elif self.parameters.get('source_volume'):
+ if not self.parameters.get('source_vserver') or not self.parameters.get('destination_vserver'):
+ self.module.fail_json(msg='Missing parameters: source vserver or destination vserver or both')
+ self.parameters['source_path'] = self.parameters['source_vserver'] + ":" + self.parameters['source_volume']
+ self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":" +\
+ self.parameters['destination_volume']
+ elif self.parameters.get('source_vserver'):
+ self.parameters['source_path'] = self.parameters['source_vserver'] + ":"
+ self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":"
+
+ def get_destination(self):
+ result = None
+ release_get = netapp_utils.zapi.NaElement('snapmirror-get-destination-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ snapmirror_dest_info = netapp_utils.zapi.NaElement('snapmirror-destination-info')
+ snapmirror_dest_info.add_new_child('destination-location', self.parameters['destination_path'])
+ query.add_child_elem(snapmirror_dest_info)
+ release_get.add_child_elem(query)
+ try:
+ result = self.source_server.invoke_successfully(release_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snapmirror destinations info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ return True
+ return None
+
+ @staticmethod
+ def element_source_path_format_matches(value):
+ return re.match(pattern=r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\/lun\/[0-9]+",
+ string=value)
+
+ def check_elementsw_parameters(self, kind='source'):
+ """
+ Validate all ElementSW cluster parameters required for managing the SnapMirror relationship
+ Validate if both source and destination paths are present
+ Validate if source_path follows the required format
+ Validate SVIP
+ Validate if ElementSW volume exists
+ :return: None
+ """
+ path = None
+ if kind == 'destination':
+ path = self.parameters.get('destination_path')
+ elif kind == 'source':
+ path = self.parameters.get('source_path')
+ if path is None:
+ self.module.fail_json(msg="Error: Missing required parameter %s_path for "
+ "connection_type %s" % (kind, self.parameters['connection_type']))
+ else:
+ if NetAppONTAPSnapmirror.element_source_path_format_matches(path) is None:
+ self.module.fail_json(msg="Error: invalid %s_path %s. "
+ "If the path is a ElementSW cluster, the value should be of the format"
+ " <Element_SVIP>:/lun/<Element_VOLUME_ID>" % (kind, path))
+ # validate source_path
+ elementsw_helper, elem = self.set_element_connection(kind)
+ self.validate_elementsw_svip(path, elem)
+ self.check_if_elementsw_volume_exists(path, elementsw_helper)
+
+ def validate_elementsw_svip(self, path, elem):
+ """
+ Validate ElementSW cluster SVIP
+ :return: None
+ """
+ result = None
+ try:
+ result = elem.get_cluster_info()
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error fetching SVIP", exception=to_native(err))
+ if result and result.cluster_info.svip:
+ cluster_svip = result.cluster_info.svip
+ svip = path.split(':')[0] # split IP address from source_path
+ if svip != cluster_svip:
+ self.module.fail_json(msg="Error: Invalid SVIP")
+
+ def check_if_elementsw_volume_exists(self, path, elementsw_helper):
+ """
+ Check if remote ElementSW volume exists
+ :return: None
+ """
+ volume_id, vol_id = None, path.split('/')[-1]
+ try:
+ volume_id = elementsw_helper.volume_id_exists(int(vol_id))
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error fetching Volume details", exception=to_native(err))
+
+ if volume_id is None:
+ self.module.fail_json(msg="Error: Source volume does not exist in the ElementSW cluster")
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ if results is None:
+ # We may be running on a vserser
+ try:
+ netapp_utils.ems_log_event(event_name, self.server)
+ except netapp_utils.zapi.NaApiError:
+ # Don't fail if we cannot log usage
+ pass
+ else:
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ """
+ Apply action to SnapMirror
+ """
+ self.asup_log_for_cserver("na_ontap_snapmirror")
+ # source is ElementSW
+ if self.parameters['state'] == 'present' and self.parameters.get('connection_type') == 'elementsw_ontap':
+ self.check_elementsw_parameters()
+ elif self.parameters.get('connection_type') == 'ontap_elementsw':
+ self.check_elementsw_parameters('destination')
+ else:
+ self.check_parameters()
+ if self.parameters['state'] == 'present' and self.parameters.get('connection_type') == 'ontap_elementsw':
+ current_elementsw_ontap = self.snapmirror_get(self.parameters['source_path'])
+ if current_elementsw_ontap is None:
+ self.module.fail_json(msg='Error: creating an ONTAP to ElementSW snapmirror relationship requires an '
+ 'established SnapMirror relation from ElementSW to ONTAP cluster')
+ restore = self.parameters.get('relationship_type', '') == 'restore'
+ current = self.snapmirror_get() if not restore else None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters) if not restore else None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if not restore else None
+ element_snapmirror = False
+ if self.parameters['state'] == 'present' and restore:
+ self.na_helper.changed = True
+ if not self.module.check_mode:
+ self.snapmirror_restore()
+ elif cd_action == 'create':
+ if not self.module.check_mode:
+ self.snapmirror_create()
+ elif cd_action == 'delete':
+ if not self.module.check_mode:
+ if current['status'] == 'transferring':
+ self.snapmirror_abort()
+ else:
+ if self.parameters.get('connection_type') == 'elementsw_ontap':
+ element_snapmirror = True
+ self.delete_snapmirror(element_snapmirror, current['relationship'], current['mirror_state'])
+ else:
+ if modify:
+ if not self.module.check_mode:
+ self.snapmirror_modify(modify)
+ # break relationship when 'relationship_state' == 'broken'
+ if current and self.parameters['state'] == 'present' and self.parameters['relationship_state'] == 'broken':
+ if current['mirror_state'] == 'uninitialized':
+ self.module.fail_json(msg='SnapMirror relationship cannot be broken if mirror state is uninitialized')
+ elif current['relationship'] in ['load_sharing', 'vault']:
+ self.module.fail_json(msg='SnapMirror break is not allowed in a load_sharing or vault relationship')
+ elif current['mirror_state'] != 'broken-off':
+ if not self.module.check_mode:
+ self.snapmirror_break()
+ self.na_helper.changed = True
+ # check for initialize
+ elif current and self.parameters['initialize'] and self.parameters['relationship_state'] == 'active'\
+ and current['mirror_state'] == 'uninitialized':
+ if not self.module.check_mode:
+ self.snapmirror_initialize()
+ # set changed explicitly for initialize
+ self.na_helper.changed = True
+ if self.parameters['state'] == 'present' and self.parameters['relationship_state'] == 'active':
+ # resume when state is quiesced
+ if current['status'] == 'quiesced':
+ if not self.module.check_mode:
+ self.snapmirror_resume()
+ # set changed explicitly for resume
+ self.na_helper.changed = True
+ # resync when state is broken-off
+ if current['mirror_state'] == 'broken-off':
+ if not self.module.check_mode:
+ self.snapmirror_resync()
+ # set changed explicitly for resync
+ self.na_helper.changed = True
+ # Update when create is called again, or modify is being called
+ elif self.parameters['update']:
+ current = self.snapmirror_get()
+ if current['mirror_state'] == 'snapmirrored':
+ if not self.module.check_mode:
+ self.snapmirror_update()
+ self.na_helper.changed = True
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPSnapmirror()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py
new file mode 100644
index 00000000..94f3aeaf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py
@@ -0,0 +1,837 @@
+#!/usr/bin/python
+
+# (c) 2019-2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_snapmirror_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_snapmirror_policy
+short_description: NetApp ONTAP create, delete or modify SnapMirror policies
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- NetApp ONTAP create, modify, or destroy the SnapMirror policy
+- Add, modify and remove SnapMirror policy rules
+- Following parameters are not supported in REST; 'owner', 'restart', 'transfer_priority', 'tries', 'ignore_atime', 'common_snapshot_schedule'
+options:
+ state:
+ description:
+ - Whether the specified SnapMirror policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ vserver:
+ description:
+ - Specifies the vserver for the SnapMirror policy.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - Specifies the SnapMirror policy name.
+ required: true
+ type: str
+ policy_type:
+ description:
+ - Specifies the SnapMirror policy type. Modifying the type of an existing SnapMirror policy is not supported
+ choices: ['vault', 'async_mirror', 'mirror_vault', 'strict_sync_mirror', 'sync_mirror']
+ type: str
+ comment:
+ description:
+ - Specifies the SnapMirror policy comment.
+ type: str
+ tries:
+ description:
+ - Specifies the number of tries.
+ type: str
+ transfer_priority:
+ description:
+ - Specifies the priority at which a SnapMirror transfer runs.
+ choices: ['low', 'normal']
+ type: str
+ common_snapshot_schedule:
+ description:
+ - Specifies the common Snapshot copy schedule associated with the policy, only required for strict_sync_mirror and sync_mirror.
+ type: str
+ owner:
+ description:
+ - Specifies the owner of the SnapMirror policy.
+ choices: ['cluster_admin', 'vserver_admin']
+ type: str
+ is_network_compression_enabled:
+ description:
+ - Specifies whether network compression is enabled for transfers.
+ type: bool
+ ignore_atime:
+ description:
+ - Specifies whether incremental transfers will ignore files which have only their access time changed. Applies to SnapMirror vault relationships only.
+ type: bool
+ restart:
+ description:
+ - Defines the behavior of SnapMirror if an interrupted transfer exists, applies to data protection only.
+ choices: ['always', 'never', 'default']
+ type: str
+ snapmirror_label:
+ description:
+ - SnapMirror policy rule label.
+ - Required when defining policy rules.
+ - Use an empty list to remove all user-defined rules.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+ keep:
+ description:
+ - SnapMirror policy rule retention count for snapshots created.
+ - Required when defining policy rules.
+ type: list
+ elements: int
+ version_added: '20.7.0'
+ prefix:
+ description:
+ - SnapMirror policy rule prefix.
+ - Optional when defining policy rules.
+ - Set to '' to not set or remove an existing custom prefix.
+ - Prefix name should be unique within the policy.
+ - When specifying a custom prefix, schedule must also be specified.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+ schedule:
+ description:
+ - SnapMirror policy rule schedule.
+ - Optional when defining policy rules.
+ - Set to '' to not set or remove a schedule.
+ - When specifying a schedule a custom prefix can be set otherwise the prefix will be set to snapmirror_label.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+
+"""
+
+EXAMPLES = """
+ - name: Create SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ comment: "created by ansible"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "async_mirror"
+ transfer_priority: "low"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Create SnapMirror policy with basic rules
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "async_mirror"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Create SnapMirror policy with rules and schedules (no schedule for daily rule)
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ schedule: ['','weekly','monthly']
+ prefix: ['','','monthly_mv']
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy with rules, remove existing schedules and prefixes
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ schedule: ['','','']
+ prefix: ['','','']
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy, delete all rules (excludes builtin rules)
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: []
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Delete SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: absent
+ vserver: "SVM1"
+ policy_type: "async_mirror"
+ policy_name: "ansible_policy"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSnapMirrorPolicy(object):
+ """
+ Create, Modifies and Destroys a SnapMirror policy
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap SnapMirror policy class
+ """
+
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ policy_type=dict(required=False, type='str',
+ choices=['vault', 'async_mirror', 'mirror_vault', 'strict_sync_mirror', 'sync_mirror']),
+ tries=dict(required=False, type='str'),
+ transfer_priority=dict(required=False, type='str', choices=['low', 'normal']),
+ common_snapshot_schedule=dict(required=False, type='str'),
+ ignore_atime=dict(required=False, type='bool'),
+ is_network_compression_enabled=dict(required=False, type='bool'),
+ owner=dict(required=False, type='str', choices=['cluster_admin', 'vserver_admin']),
+ restart=dict(required=False, type='str', choices=['always', 'never', 'default']),
+ snapmirror_label=dict(required=False, type="list", elements="str"),
+ keep=dict(required=False, type="list", elements="int"),
+ prefix=dict(required=False, type="list", elements="str"),
+ schedule=dict(required=False, type="list", elements="str"),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['owner', 'restart', 'transfer_priority', 'tries', 'ignore_atime',
+ 'common_snapshot_schedule']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+
+ if error:
+ self.module.fail_json(msg=error)
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_snapmirror_policy(self):
+
+ if self.use_rest:
+ data = {'fields': 'uuid,name,svm.name,comment,network_compression_enabled,type,retention',
+ 'name': self.parameters['policy_name'],
+ 'svm.name': self.parameters['vserver']}
+ api = "snapmirror/policies"
+ message, error = self.rest_api.get(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message['records']) != 0:
+ return_value = {
+ 'uuid': message['records'][0]['uuid'],
+ 'vserver': message['records'][0]['svm']['name'],
+ 'policy_name': message['records'][0]['name'],
+ 'comment': '',
+ 'is_network_compression_enabled': message['records'][0]['network_compression_enabled'],
+ 'snapmirror_label': list(),
+ 'keep': list(),
+ 'prefix': list(),
+ 'schedule': list()
+ }
+ if 'type' in message['records'][0]:
+ policy_type = message['records'][0]['type']
+ if policy_type == 'async':
+ policy_type = 'async_mirror'
+ elif policy_type == 'sync':
+ policy_type = 'sync_mirror'
+ return_value['policy_type'] = policy_type
+ if 'comment' in message['records'][0]:
+ return_value['comment'] = message['records'][0]['comment']
+ if 'retention' in message['records'][0]:
+ for rule in message['records'][0]['retention']:
+ return_value['snapmirror_label'].append(rule['label'])
+ return_value['keep'].append(int(rule['count']))
+ if rule['prefix'] == '-':
+ return_value['prefix'].append('')
+ else:
+ return_value['prefix'].append(rule['prefix'])
+ if rule['creation_schedule']['name'] == '-':
+ return_value['schedule'].append('')
+ else:
+ return_value['schedule'].append(rule['creation_schedule']['name'])
+ return return_value
+ return None
+ else:
+ return_value = None
+
+ snapmirror_policy_get_iter = netapp_utils.zapi.NaElement('snapmirror-policy-get-iter')
+ snapmirror_policy_info = netapp_utils.zapi.NaElement('snapmirror-policy-info')
+ snapmirror_policy_info.add_new_child('policy-name', self.parameters['policy_name'])
+ snapmirror_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(snapmirror_policy_info)
+ snapmirror_policy_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(snapmirror_policy_get_iter, True)
+ if result.get_child_by_name('attributes-list'):
+ snapmirror_policy_attributes = result['attributes-list']['snapmirror-policy-info']
+
+ return_value = {
+ 'policy_name': snapmirror_policy_attributes['policy-name'],
+ 'tries': snapmirror_policy_attributes['tries'],
+ 'transfer_priority': snapmirror_policy_attributes['transfer-priority'],
+ 'is_network_compression_enabled': self.na_helper.get_value_for_bool(True,
+ snapmirror_policy_attributes['is-network-compression-enabled']),
+ 'restart': snapmirror_policy_attributes['restart'],
+ 'ignore_atime': self.na_helper.get_value_for_bool(True, snapmirror_policy_attributes['ignore-atime']),
+ 'vserver': snapmirror_policy_attributes['vserver-name'],
+ 'comment': '',
+ 'snapmirror_label': list(),
+ 'keep': list(),
+ 'prefix': list(),
+ 'schedule': list()
+ }
+ if snapmirror_policy_attributes.get_child_content('comment') is not None:
+ return_value['comment'] = snapmirror_policy_attributes['comment']
+
+ if snapmirror_policy_attributes.get_child_content('type') is not None:
+ return_value['policy_type'] = snapmirror_policy_attributes['type']
+
+ if snapmirror_policy_attributes.get_child_by_name('snapmirror-policy-rules'):
+ for rule in snapmirror_policy_attributes['snapmirror-policy-rules'].get_children():
+ # Ignore builtin rules
+ if rule.get_child_content('snapmirror-label') == "sm_created" or \
+ rule.get_child_content('snapmirror-label') == "all_source_snapshots":
+ continue
+
+ return_value['snapmirror_label'].append(rule.get_child_content('snapmirror-label'))
+ return_value['keep'].append(int(rule.get_child_content('keep')))
+
+ prefix = rule.get_child_content('prefix')
+ if prefix is None or prefix == '-':
+ prefix = ''
+ return_value['prefix'].append(prefix)
+
+ schedule = rule.get_child_content('schedule')
+ if schedule is None or schedule == '-':
+ schedule = ''
+ return_value['schedule'].append(schedule)
+
+ except netapp_utils.zapi.NaApiError as error:
+ if 'NetApp API failed. Reason - 13001:' in to_native(error):
+ # Policy does not exist
+ pass
+ else:
+ self.module.fail_json(msg='Error getting snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return return_value
+
+ def validate_parameters(self):
+ """
+ Validate snapmirror policy rules
+ :return: None
+ """
+
+ # For snapmirror policy rules, 'snapmirror_label' is required.
+ if 'snapmirror_label' in self.parameters:
+
+ # Check size of 'snapmirror_label' list is 0-10. Can have zero rules.
+ # Take builtin 'sm_created' rule into account for 'mirror_vault'.
+ if (('policy_type' in self.parameters and
+ self.parameters['policy_type'] == 'mirror_vault' and
+ len(self.parameters['snapmirror_label']) > 9) or
+ len(self.parameters['snapmirror_label']) > 10):
+ self.module.fail_json(msg="Error: A SnapMirror Policy can have up to a maximum of "
+ "10 rules (including builtin rules), with a 'keep' value "
+ "representing the maximum number of Snapshot copies for each rule")
+
+ # 'keep' must be supplied as long as there is at least one snapmirror_label
+ if len(self.parameters['snapmirror_label']) > 0 and 'keep' not in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'keep' parameter. When specifying the "
+ "'snapmirror_label' parameter, the 'keep' parameter must "
+ "also be supplied")
+
+ # Make sure other rule values match same number of 'snapmirror_label' values.
+ for rule_parameter in ['keep', 'prefix', 'schedule']:
+ if rule_parameter in self.parameters:
+ if len(self.parameters['snapmirror_label']) > len(self.parameters[rule_parameter]):
+ self.module.fail_json(msg="Error: Each 'snapmirror_label' value must have "
+ "an accompanying '%s' value" % rule_parameter)
+ if len(self.parameters[rule_parameter]) > len(self.parameters['snapmirror_label']):
+ self.module.fail_json(msg="Error: Each '%s' value must have an accompanying "
+ "'snapmirror_label' value" % rule_parameter)
+ else:
+ # 'snapmirror_label' not supplied.
+ # Bail out if other rule parameters have been supplied.
+ for rule_parameter in ['keep', 'prefix', 'schedule']:
+ if rule_parameter in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'snapmirror_label' parameter. When "
+ "specifying the '%s' parameter, the 'snapmirror_label' "
+ "parameter must also be supplied" % rule_parameter)
+
+ # Schedule must be supplied if prefix is supplied.
+ if 'prefix' in self.parameters and 'schedule' not in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'schedule' parameter. When "
+ "specifying the 'prefix' parameter, the 'schedule' "
+ "parameter must also be supplied")
+
+ def create_snapmirror_policy(self):
+ """
+ Creates a new storage efficiency policy
+ """
+ self.validate_parameters()
+ if self.use_rest:
+ data = {'name': self.parameters['policy_name'],
+ 'svm': {'name': self.parameters['vserver']}}
+ if 'policy_type' in self.parameters.keys():
+ if 'async_mirror' in self.parameters['policy_type']:
+ data['type'] = 'async'
+ elif 'sync_mirror' in self.parameters['policy_type']:
+ data['type'] = 'sync'
+ data['sync_type'] = 'sync'
+ else:
+ self.module.fail_json(msg='policy type in REST only supports options async_mirror or sync_mirror, given %s'
+ % (self.parameters['policy_type']))
+ data = self.create_snapmirror_policy_obj_for_rest(data, data['type'])
+ else:
+ data = self.create_snapmirror_policy_obj_for_rest(data)
+ api = "snapmirror/policies"
+ response, error = self.rest_api.post(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'job' in response:
+ message, error = self.rest_api.wait_on_job(response['job'], increment=5)
+ if error:
+ self.module.fail_json(msg="%s" % error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-create")
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ if 'policy_type' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("type", self.parameters['policy_type'])
+ snapmirror_policy_obj = self.create_snapmirror_policy_obj(snapmirror_policy_obj)
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_snapmirror_policy_obj(self, snapmirror_policy_obj):
+ if 'comment' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("comment", self.parameters['comment'])
+ if 'common_snapshot_schedule' in self.parameters.keys() and 'sync_mirror' in self.parameters['policy_type']:
+ snapmirror_policy_obj.add_new_child("common-snapshot-schedule", self.parameters['common_snapshot_schedule'])
+ if 'ignore_atime' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("ignore-atime", self.na_helper.get_value_for_bool(False, self.parameters['ignore_atime']))
+ if 'is_network_compression_enabled' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("is-network-compression-enabled",
+ self.na_helper.get_value_for_bool(False, self.parameters['is_network_compression_enabled']))
+ if 'owner' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("owner", self.parameters['owner'])
+ if 'restart' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("restart", self.parameters['restart'])
+ if 'transfer_priority' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("transfer-priority", self.parameters['transfer_priority'])
+ if 'tries' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("tries", self.parameters['tries'])
+ return snapmirror_policy_obj
+
+ def create_snapmirror_policy_obj_for_rest(self, snapmirror_policy_obj, policy_type=None):
+ if 'comment' in self.parameters.keys():
+ snapmirror_policy_obj["comment"] = self.parameters['comment']
+ if 'is_network_compression_enabled' in self.parameters:
+ if policy_type == 'async':
+ snapmirror_policy_obj["network_compression_enabled"] = self.parameters['is_network_compression_enabled']
+ elif policy_type == 'sync':
+ self.module.fail_json(msg="Input parameter network_compression_enabled is not valid for SnapMirror policy type sync")
+ return snapmirror_policy_obj
+
+ def create_snapmirror_policy_retention_obj_for_rest(self, rules=None):
+ """
+ Create SnapMirror policy retention REST object.
+ :param list rules: e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': 'daily', 'schedule': 'daily'}, ... ]
+ :return: List of retention REST objects.
+ e.g. [{'label': 'daily', 'count': 7, 'prefix': 'daily', 'creation_schedule': {'name': 'daily'}}, ... ]
+ """
+ snapmirror_policy_retention_objs = list()
+ if rules is not None:
+ for rule in rules:
+ retention = {'label': rule['snapmirror_label'], 'count': str(rule['keep'])}
+ if 'prefix' in rule and rule['prefix'] != '':
+ retention['prefix'] = rule['prefix']
+ if 'schedule' in rule and rule['schedule'] != '':
+ retention['creation_schedule'] = {'name': rule['schedule']}
+ snapmirror_policy_retention_objs.append(retention)
+ return snapmirror_policy_retention_objs
+
+ def delete_snapmirror_policy(self, uuid=None):
+ """
+ Deletes a snapmirror policy
+ """
+ if self.use_rest:
+ api = "snapmirror/policies/%s" % uuid
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-delete")
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapmirror_policy(self, uuid=None, policy_type=None):
+ """
+ Modifies a snapmirror policy
+ """
+ if self.use_rest:
+ api = "snapmirror/policies/" + uuid
+ data = self.create_snapmirror_policy_obj_for_rest(dict(), policy_type)
+ dummy, error = self.rest_api.patch(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-modify")
+ snapmirror_policy_obj = self.create_snapmirror_policy_obj(snapmirror_policy_obj)
+ # Only modify snapmirror policy if a specific snapmirror policy attribute needs
+ # modifying. It may be that only snapmirror policy rules are being modified.
+ if snapmirror_policy_obj.get_children():
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def identify_new_snapmirror_policy_rules(self, current=None):
+ """
+ Identify new rules that should be added.
+ :return: List of new rules to be added
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ new_rules = list()
+ if 'snapmirror_label' in self.parameters:
+ for snapmirror_label in self.parameters['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+
+ # Construct new rule. prefix and schedule are optional.
+ snapmirror_label_index = self.parameters['snapmirror_label'].index(snapmirror_label)
+ rule = dict({
+ 'snapmirror_label': snapmirror_label,
+ 'keep': self.parameters['keep'][snapmirror_label_index]
+ })
+ if 'prefix' in self.parameters:
+ rule['prefix'] = self.parameters['prefix'][snapmirror_label_index]
+ else:
+ rule['prefix'] = ''
+ if 'schedule' in self.parameters:
+ rule['schedule'] = self.parameters['schedule'][snapmirror_label_index]
+ else:
+ rule['schedule'] = ''
+
+ if current is not None and 'snapmirror_label' in current:
+ if snapmirror_label not in current['snapmirror_label']:
+ # Rule doesn't exist. Add new rule.
+ new_rules.append(rule)
+ else:
+ # No current or any rules. Add new rule.
+ new_rules.append(rule)
+ return new_rules
+
+ def identify_obsolete_snapmirror_policy_rules(self, current=None):
+ """
+ Identify existing rules that should be deleted
+ :return: List of rules to be deleted
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ obsolete_rules = list()
+ if 'snapmirror_label' in self.parameters:
+ if current is not None and 'snapmirror_label' in current:
+ # Iterate existing rules.
+ for snapmirror_label in current['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+ if snapmirror_label not in [item.strip() for item in self.parameters['snapmirror_label']]:
+ # Existing rule isn't in parameters. Delete existing rule.
+ current_snapmirror_label_index = current['snapmirror_label'].index(snapmirror_label)
+ rule = dict({
+ 'snapmirror_label': snapmirror_label,
+ 'keep': current['keep'][current_snapmirror_label_index],
+ 'prefix': current['prefix'][current_snapmirror_label_index],
+ 'schedule': current['schedule'][current_snapmirror_label_index]
+ })
+ obsolete_rules.append(rule)
+ return obsolete_rules
+
+ def identify_modified_snapmirror_policy_rules(self, current=None):
+ """
+ Identify self.parameters rules that will be modified or not.
+ :return: List of 'modified' rules and a list of 'unmodified' rules
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ modified_rules = list()
+ unmodified_rules = list()
+ if 'snapmirror_label' in self.parameters:
+ for snapmirror_label in self.parameters['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+ if current is not None and 'snapmirror_label' in current:
+ if snapmirror_label in current['snapmirror_label']:
+ # Rule exists. Identify whether it requires modification or not.
+ modified = False
+ rule = dict()
+ rule['snapmirror_label'] = snapmirror_label
+
+ # Get indexes of current and supplied rule.
+ current_snapmirror_label_index = current['snapmirror_label'].index(snapmirror_label)
+ snapmirror_label_index = self.parameters['snapmirror_label'].index(snapmirror_label)
+
+ # Check if keep modified
+ if self.parameters['keep'][snapmirror_label_index] != current['keep'][current_snapmirror_label_index]:
+ modified = True
+ rule['keep'] = self.parameters['keep'][snapmirror_label_index]
+ else:
+ rule['keep'] = current['keep'][current_snapmirror_label_index]
+
+ # Check if prefix modified
+ if 'prefix' in self.parameters:
+ if self.parameters['prefix'][snapmirror_label_index] != current['prefix'][current_snapmirror_label_index]:
+ modified = True
+ rule['prefix'] = self.parameters['prefix'][snapmirror_label_index]
+ else:
+ rule['prefix'] = current['prefix'][current_snapmirror_label_index]
+ else:
+ rule['prefix'] = current['prefix'][current_snapmirror_label_index]
+
+ # Check if schedule modified
+ if 'schedule' in self.parameters:
+ if self.parameters['schedule'][snapmirror_label_index] != current['schedule'][current_snapmirror_label_index]:
+ modified = True
+ rule['schedule'] = self.parameters['schedule'][snapmirror_label_index]
+ else:
+ rule['schedule'] = current['schedule'][current_snapmirror_label_index]
+ else:
+ rule['schedule'] = current['schedule'][current_snapmirror_label_index]
+
+ if modified:
+ modified_rules.append(rule)
+ else:
+ unmodified_rules.append(rule)
+ return modified_rules, unmodified_rules
+
+ def identify_snapmirror_policy_rules_with_schedule(self, rules=None):
+ """
+ Identify rules that are using a schedule or not. At least one
+ non-schedule rule must be added to a policy before schedule rules
+ are added.
+ :return: List of rules with schedules and a list of rules without schedules
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': 'daily', 'schedule': 'daily'}, ... ],
+ [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ schedule_rules = list()
+ non_schedule_rules = list()
+ if rules is not None:
+ for rule in rules:
+ if 'schedule' in rule:
+ schedule_rules.append(rule)
+ else:
+ non_schedule_rules.append(rule)
+ return schedule_rules, non_schedule_rules
+
+ def modify_snapmirror_policy_rules(self, current=None, uuid=None):
+ """
+ Modify existing rules in snapmirror policy
+ :return: None
+ """
+ self.validate_parameters()
+
+ # Need 'snapmirror_label' to add/modify/delete rules
+ if 'snapmirror_label' not in self.parameters:
+ return
+
+ obsolete_rules = self.identify_obsolete_snapmirror_policy_rules(current)
+ new_rules = self.identify_new_snapmirror_policy_rules(current)
+ modified_rules, unmodified_rules = self.identify_modified_snapmirror_policy_rules(current)
+
+ if self.use_rest:
+ api = "snapmirror/policies/" + uuid
+ data = {'retention': list()}
+
+ # As rule 'prefix' can't be unset, have to delete existing rules first.
+ # Builtin rules remain.
+ dummy, error = self.rest_api.patch(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+
+ # Re-add desired rules.
+ rules = unmodified_rules + modified_rules + new_rules
+ data['retention'] = self.create_snapmirror_policy_retention_obj_for_rest(rules)
+
+ if len(data['retention']) > 0:
+ dummy, error = self.rest_api.patch(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ delete_rules = obsolete_rules + modified_rules
+ add_schedule_rules, add_non_schedule_rules = self.identify_snapmirror_policy_rules_with_schedule(new_rules + modified_rules)
+ # Delete rules no longer required or modified rules that will be re-added.
+ for rule in delete_rules:
+ options = {'policy-name': self.parameters['policy_name'],
+ 'snapmirror-label': rule['snapmirror_label']}
+ self.modify_snapmirror_policy_rule(options, 'snapmirror-policy-remove-rule')
+
+ # Add rules. At least one non-schedule rule must exist before
+ # a rule with a schedule can be added, otherwise zapi will complain.
+ for rule in add_non_schedule_rules + add_schedule_rules:
+ options = {'policy-name': self.parameters['policy_name'],
+ 'snapmirror-label': rule['snapmirror_label'],
+ 'keep': str(rule['keep'])}
+ if 'prefix' in rule and rule['prefix'] != '':
+ options['prefix'] = rule['prefix']
+ if 'schedule' in rule and rule['schedule'] != '':
+ options['schedule'] = rule['schedule']
+ self.modify_snapmirror_policy_rule(options, 'snapmirror-policy-add-rule')
+
+ def modify_snapmirror_policy_rule(self, options, zapi):
+ """
+ Add, modify or remove a rule to/from a snapmirror policy
+ """
+ snapmirror_obj = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(snapmirror_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapmirror policy rule %s: %s' %
+ (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_snapmirror_policy", cserver)
+
+ def apply(self):
+ uuid = None
+ if not self.use_rest:
+ self.asup_log_for_cserver()
+ current, modify = self.get_snapmirror_policy(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if current and cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if 'policy_type' in modify:
+ self.module.fail_json(msg='Error: policy type cannot be changed: current=%s, expected=%s' %
+ (current.get('policy_type'), modify['policy_type']))
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_snapmirror_policy()
+ if self.use_rest:
+ current = self.get_snapmirror_policy()
+ uuid = current['uuid']
+ self.modify_snapmirror_policy_rules(current, uuid)
+ else:
+ self.modify_snapmirror_policy_rules(current)
+ elif cd_action == 'delete':
+ if self.use_rest:
+ uuid = current['uuid']
+ self.delete_snapmirror_policy(uuid)
+ elif modify:
+ if self.use_rest:
+ uuid = current['uuid']
+ self.modify_snapmirror_policy(uuid, current['policy_type'])
+ self.modify_snapmirror_policy_rules(current, uuid)
+ else:
+ self.modify_snapmirror_policy()
+ self.modify_snapmirror_policy_rules(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap SnapMirror policy object and runs the correct play task
+ """
+ obj = NetAppOntapSnapMirrorPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py
new file mode 100644
index 00000000..fdc32b5b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py
@@ -0,0 +1,333 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_snapshot
+short_description: NetApp ONTAP manage Snapshots
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete ONTAP snapshots
+options:
+ state:
+ description:
+ - If you want to create/modify a snapshot, or delete it.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ snapshot:
+ description:
+ Name of the snapshot to be managed.
+ The maximum string length is 256 characters.
+ required: true
+ type: str
+ from_name:
+ description:
+ - Name of the existing snapshot to be renamed to.
+ version_added: 2.8.0
+ type: str
+ volume:
+ description:
+ - Name of the volume on which the snapshot is to be created.
+ required: true
+ type: str
+ async_bool:
+ description:
+ - If true, the snapshot is to be created asynchronously.
+ type: bool
+ comment:
+ description:
+ A human readable comment attached with the snapshot.
+ The size of the comment can be at most 255 characters.
+ type: str
+ snapmirror_label:
+ description:
+ A human readable SnapMirror Label attached with the snapshot.
+ Size of the label can be at most 31 characters.
+ type: str
+ ignore_owners:
+ description:
+ - if this field is true, snapshot will be deleted
+ even if some other processes are accessing it.
+ type: bool
+ snapshot_instance_uuid:
+ description:
+ - The 128 bit unique snapshot identifier expressed in the form of UUID.
+ type: str
+ vserver:
+ description:
+ - The Vserver name
+ required: true
+ type: str
+'''
+EXAMPLES = """
+ - name: create SnapShot
+ tags:
+ - create
+ na_ontap_snapshot:
+ state: present
+ snapshot: "{{ snapshot name }}"
+ volume: "{{ vol name }}"
+ comment: "i am a comment"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ - name: delete SnapShot
+ tags:
+ - delete
+ na_ontap_snapshot:
+ state: absent
+ snapshot: "{{ snapshot name }}"
+ volume: "{{ vol name }}"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ - name: modify SnapShot
+ tags:
+ - modify
+ na_ontap_snapshot:
+ state: present
+ snapshot: "{{ snapshot name }}"
+ comment: "New comments are great"
+ volume: "{{ vol name }}"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSnapshot(object):
+ """
+ Creates, modifies, and deletes a Snapshot
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ from_name=dict(required=False, type='str'),
+ snapshot=dict(required=True, type="str"),
+ volume=dict(required=True, type="str"),
+ async_bool=dict(required=False, type="bool", default=False),
+ comment=dict(required=False, type="str"),
+ snapmirror_label=dict(required=False, type="str"),
+ ignore_owners=dict(required=False, type="bool", default=False),
+ snapshot_instance_uuid=dict(required=False, type="str"),
+ vserver=dict(required=True, type="str"),
+
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def get_snapshot(self, snapshot_name=None):
+ """
+ Checks to see if a snapshot exists or not
+ :return: Return True if a snapshot exists, False if it doesn't
+ """
+ if snapshot_name is None:
+ snapshot_name = self.parameters['snapshot']
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter")
+ desired_attr = netapp_utils.zapi.NaElement("desired-attributes")
+ snapshot_info = netapp_utils.zapi.NaElement('snapshot-info')
+ comment = netapp_utils.zapi.NaElement('comment')
+ snapmirror_label = netapp_utils.zapi.NaElement('snapmirror-label')
+ # add more desired attributes that are allowed to be modified
+ snapshot_info.add_child_elem(comment)
+ snapshot_info.add_child_elem(snapmirror_label)
+ desired_attr.add_child_elem(snapshot_info)
+ snapshot_obj.add_child_elem(desired_attr)
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", snapshot_name)
+ snapshot_info_obj.add_new_child("volume", self.parameters['volume'])
+ snapshot_info_obj.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ snap_info = attributes_list.get_child_by_name('snapshot-info')
+ return_value = {'comment': snap_info.get_child_content('comment')}
+ if snap_info.get_child_by_name('snapmirror-label'):
+ return_value['snapmirror_label'] = snap_info.get_child_content('snapmirror-label')
+ else:
+ return_value['snapmirror_label'] = None
+ return return_value
+
+ def create_snapshot(self):
+ """
+ Creates a new snapshot
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-create")
+
+ # set up required variables to create a snapshot
+ snapshot_obj.add_new_child("snapshot", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ # Set up optional variables to create a snapshot
+ if self.parameters.get('async_bool'):
+ snapshot_obj.add_new_child("async", str(self.parameters['async_bool']))
+ if self.parameters.get('comment'):
+ snapshot_obj.add_new_child("comment", self.parameters['comment'])
+ if self.parameters.get('snapmirror_label'):
+ snapshot_obj.add_new_child(
+ "snapmirror-label", self.parameters['snapmirror_label'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_snapshot(self):
+ """
+ Deletes an existing snapshot
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-delete")
+
+ # Set up required variables to delete a snapshot
+ snapshot_obj.add_new_child("snapshot", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ # set up optional variables to delete a snapshot
+ if self.parameters.get('ignore_owners'):
+ snapshot_obj.add_new_child("ignore-owners", str(self.parameters['ignore_owners']))
+ if self.parameters.get('snapshot_instance_uuid'):
+ snapshot_obj.add_new_child("snapshot-instance-uuid", self.parameters['snapshot_instance_uuid'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapshot(self):
+ """
+ Modify an existing snapshot
+ :return:
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-modify-iter")
+ # Create query object, this is the existing object
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.parameters['snapshot'])
+ snapshot_info_obj.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+
+ # this is what we want to modify in the snapshot object
+ attributes = netapp_utils.zapi.NaElement("attributes")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.parameters['snapshot'])
+ if self.parameters.get('comment'):
+ snapshot_info_obj.add_new_child("comment", self.parameters['comment'])
+ if self.parameters.get('snapmirror_label'):
+ snapshot_info_obj.add_new_child("snapmirror-label", self.parameters['snapmirror_label'])
+ attributes.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(attributes)
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_snapshot(self):
+ """
+ Rename the sanpshot
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-rename")
+
+ # set up required variables to rename a snapshot
+ snapshot_obj.add_new_child("current-name", self.parameters['from_name'])
+ snapshot_obj.add_new_child("new-name", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming snapshot %s to %s: %s' %
+ (self.parameters['from_name'], self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Check to see which play we should run
+ """
+ current = self.get_snapshot()
+ netapp_utils.ems_log_event("na_ontap_snapshot", self.server)
+ rename, cd_action = None, None
+ modify = {}
+ if self.parameters.get('from_name'):
+ current_old_name = self.get_snapshot(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(current_old_name, current)
+ modify = self.na_helper.get_modified_attributes(current_old_name, self.parameters)
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_snapshot()
+ if cd_action == 'create':
+ self.create_snapshot()
+ elif cd_action == 'delete':
+ self.delete_snapshot()
+ elif modify:
+ self.modify_snapshot()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates, modifies, and deletes a Snapshot
+ """
+ obj = NetAppOntapSnapshot()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py
new file mode 100644
index 00000000..ac9cd674
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py
@@ -0,0 +1,500 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_snapshot_policy
+short_description: NetApp ONTAP manage Snapshot Policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete ONTAP snapshot policies
+options:
+ state:
+ description:
+ - If you want to create, modify or delete a snapshot policy.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ Name of the snapshot policy to be managed.
+ The maximum string length is 256 characters.
+ required: true
+ type: str
+ enabled:
+ description:
+ - Status of the snapshot policy indicating whether the policy will be enabled or disabled.
+ type: bool
+ comment:
+ description:
+ A human readable comment attached with the snapshot.
+ The size of the comment can be at most 255 characters.
+ type: str
+ count:
+ description:
+ Retention count for the snapshots created by the schedule.
+ type: list
+ elements: int
+ schedule:
+ description:
+ - Schedule to be added inside the policy.
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Snapshot name prefix for the schedule.
+ - Prefix name should be unique within the policy.
+ - Cannot set a different prefix to a schedule that has already been assigned to a snapshot policy.
+ - Prefix cannot be modifed after schedule has been added.
+ type: list
+ elements: str
+ required: false
+ version_added: '19.10.1'
+ snapmirror_label:
+ description:
+ - SnapMirror label assigned to each schedule inside the policy. Use an empty
+ string ('') for no label.
+ type: list
+ elements: str
+ required: false
+ version_added: 2.9.0
+ vserver:
+ description:
+ - The name of the vserver to use. In a multi-tenanted environment, assigning a
+ Snapshot Policy to a vserver will restrict its use to that vserver.
+ required: false
+ type: str
+ version_added: 2.9.0
+'''
+EXAMPLES = """
+ - name: Create Snapshot policy
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: hourly
+ prefix: hourly
+ count: 150
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Create Snapshot policy with multiple schedules
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ prefix: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ count: [1, 2, 3, 4, 5]
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Create Snapshot policy owned by a vserver
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible3
+ vserver: ansible
+ schedule: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ prefix: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ count: [1, 2, 3, 4, 5]
+ snapmirror_label: ['hourly', 'daily', 'weekly', 'monthly', '']
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Modify Snapshot policy with multiple schedules
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: ['daily', 'weekly']
+ count: [20, 30]
+ snapmirror_label: ['daily', 'weekly']
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Delete Snapshot policy
+ na_ontap_snapshot_policy:
+ state: absent
+ name: ansible2
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSnapshotPolicy(object):
+ """
+ Creates and deletes a Snapshot Policy
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type="str"),
+ enabled=dict(required=False, type="bool"),
+ # count is a list of integers
+ count=dict(required=False, type="list", elements="int"),
+ comment=dict(required=False, type="str"),
+ schedule=dict(required=False, type="list", elements="str"),
+ prefix=dict(required=False, type="list", elements="str"),
+ snapmirror_label=dict(required=False, type="list", elements="str"),
+ vserver=dict(required=False, type="str")
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['enabled', 'count', 'schedule']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ if 'vserver' in self.parameters:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_snapshot_policy(self):
+ """
+ Checks to see if a snapshot policy exists or not
+ :return: Return policy details if a snapshot policy exists, None if it doesn't
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-policy-get-iter")
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-policy-info")
+ snapshot_info_obj.add_new_child("policy", self.parameters['name'])
+ if 'vserver' in self.parameters:
+ snapshot_info_obj.add_new_child("vserver-name", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ snapshot_policy = result.get_child_by_name('attributes-list').get_child_by_name('snapshot-policy-info')
+ current = {}
+ current['name'] = snapshot_policy.get_child_content('policy')
+ current['vserver'] = snapshot_policy.get_child_content('vserver-name')
+ current['enabled'] = False if snapshot_policy.get_child_content('enabled').lower() == 'false' else True
+ current['comment'] = snapshot_policy.get_child_content('comment') or ''
+ current['schedule'], current['count'], current['snapmirror_label'], current['prefix'] = [], [], [], []
+ if snapshot_policy.get_child_by_name('snapshot-policy-schedules'):
+ for schedule in snapshot_policy['snapshot-policy-schedules'].get_children():
+ current['schedule'].append(schedule.get_child_content('schedule'))
+ current['count'].append(int(schedule.get_child_content('count')))
+
+ snapmirror_label = schedule.get_child_content('snapmirror-label')
+ if snapmirror_label is None or snapmirror_label == '-':
+ snapmirror_label = ''
+ current['snapmirror_label'].append(snapmirror_label)
+
+ prefix = schedule.get_child_content('prefix')
+ if prefix is None or prefix == '-':
+ prefix = ''
+ current['prefix'].append(prefix)
+ return current
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return None
+
+ def validate_parameters(self):
+ """
+ Validate if each schedule has a count associated
+ :return: None
+ """
+ if 'count' not in self.parameters or 'schedule' not in self.parameters or \
+ len(self.parameters['count']) > 5 or len(self.parameters['schedule']) > 5 or \
+ len(self.parameters['count']) < 1 or len(self.parameters['schedule']) < 1 or \
+ len(self.parameters['count']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: A Snapshot policy must have at least 1 "
+ "schedule and can have up to a maximum of 5 schedules, with a count "
+ "representing the maximum number of Snapshot copies for each schedule")
+
+ if 'snapmirror_label' in self.parameters:
+ if len(self.parameters['snapmirror_label']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: Each Snapshot Policy schedule must have an "
+ "accompanying SnapMirror Label")
+
+ if 'prefix' in self.parameters:
+ if len(self.parameters['prefix']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: Each Snapshot Policy schedule must have an "
+ "accompanying prefix")
+
+ def modify_snapshot_policy(self, current):
+ """
+ Modifies an existing snapshot policy
+ """
+ # Set up required variables to modify snapshot policy
+ options = {'policy': self.parameters['name']}
+ modify = False
+
+ # Set up optional variables to modify snapshot policy
+ if 'enabled' in self.parameters and self.parameters['enabled'] != current['enabled']:
+ options['enabled'] = str(self.parameters['enabled'])
+ modify = True
+ if 'comment' in self.parameters and self.parameters['comment'] != current['comment']:
+ options['comment'] = self.parameters['comment']
+ modify = True
+
+ if modify:
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-policy-modify', **options)
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapshot_policy_schedules(self, current):
+ """
+ Modify existing schedules in snapshot policy
+ :return: None
+ """
+ self.validate_parameters()
+
+ delete_schedules, modify_schedules, add_schedules = [], [], []
+
+ if 'snapmirror_label' in self.parameters:
+ snapmirror_labels = self.parameters['snapmirror_label']
+ else:
+ # User hasn't supplied any snapmirror labels.
+ snapmirror_labels = [None] * len(self.parameters['schedule'])
+
+ # Identify schedules for deletion
+ for schedule in current['schedule']:
+ schedule = schedule.strip()
+ if schedule not in [item.strip() for item in self.parameters['schedule']]:
+ options = {'policy': current['name'],
+ 'schedule': schedule}
+ delete_schedules.append(options)
+
+ # Identify schedules to be modified or added
+ for schedule, count, snapmirror_label in zip(self.parameters['schedule'], self.parameters['count'], snapmirror_labels):
+ schedule = schedule.strip()
+ if snapmirror_label is not None:
+ snapmirror_label = snapmirror_label.strip()
+
+ options = {'policy': current['name'],
+ 'schedule': schedule}
+
+ if schedule in current['schedule']:
+ # Schedule exists. Only modify if it has changed.
+ modify = False
+ schedule_index = current['schedule'].index(schedule)
+
+ if count != current['count'][schedule_index]:
+ options['new-count'] = str(count)
+ modify = True
+
+ if snapmirror_label is not None:
+ if snapmirror_label != current['snapmirror_label'][schedule_index]:
+ options['new-snapmirror-label'] = snapmirror_label
+ modify = True
+
+ if modify:
+ modify_schedules.append(options)
+ else:
+ # New schedule
+ options['count'] = str(count)
+ if snapmirror_label is not None and snapmirror_label != '':
+ options['snapmirror-label'] = snapmirror_label
+ add_schedules.append(options)
+
+ # Delete N-1 schedules no longer required. Must leave 1 schedule in policy
+ # at any one time. Delete last one afterwards.
+ while len(delete_schedules) > 1:
+ options = delete_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-remove-schedule')
+
+ # Modify schedules.
+ while len(modify_schedules) > 0:
+ options = modify_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-modify-schedule')
+
+ # Add N-1 new schedules. Add last one after last schedule has been deleted.
+ while len(add_schedules) > 1:
+ options = add_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-add-schedule')
+
+ # Delete last schedule no longer required.
+ while len(delete_schedules) > 0:
+ options = delete_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-remove-schedule')
+
+ # Add last new schedule.
+ while len(add_schedules) > 0:
+ options = add_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-add-schedule')
+
+ def modify_snapshot_policy_schedule(self, options, zapi):
+ """
+ Add, modify or remove a schedule to/from a snapshot policy
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(snapshot_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot policy schedule %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_snapshot_policy(self):
+ """
+ Creates a new snapshot policy
+ """
+ # set up required variables to create a snapshot policy
+ self.validate_parameters()
+ options = {'policy': self.parameters['name'],
+ 'enabled': str(self.parameters['enabled']),
+ }
+
+ if 'snapmirror_label' in self.parameters:
+ snapmirror_labels = self.parameters['snapmirror_label']
+ else:
+ # User hasn't supplied any snapmirror labels.
+ snapmirror_labels = [None] * len(self.parameters['schedule'])
+
+ if 'prefix' in self.parameters:
+ prefixes = self.parameters['prefix']
+ else:
+ # User hasn't supplied any prefixes.
+ prefixes = [None] * len(self.parameters['schedule'])
+
+ # zapi attribute for first schedule is schedule1, second is schedule2 and so on
+ positions = [str(i) for i in range(1, len(self.parameters['schedule']) + 1)]
+ for schedule, prefix, count, snapmirror_label, position in \
+ zip(self.parameters['schedule'], prefixes,
+ self.parameters['count'], snapmirror_labels, positions):
+ schedule = schedule.strip()
+ options['count' + position] = str(count)
+ options['schedule' + position] = schedule
+ if snapmirror_label is not None:
+ snapmirror_label = snapmirror_label.strip()
+ if snapmirror_label != '':
+ options['snapmirror-label' + position] = snapmirror_label
+ if prefix is not None:
+ prefix = prefix.strip()
+ if prefix != '':
+ options['prefix' + position] = prefix
+
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-policy-create', **options)
+
+ # Set up optional variables to create a snapshot policy
+ if self.parameters.get('comment'):
+ snapshot_obj.add_new_child("comment", self.parameters['comment'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_snapshot_policy(self):
+ """
+ Deletes an existing snapshot policy
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-policy-delete")
+
+ # Set up required variables to delete a snapshot policy
+ snapshot_obj.add_new_child("policy", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ if 'vserver' in self.parameters:
+ netapp_utils.ems_log_event(event_name, self.server)
+ else:
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ """
+ Check to see which play we should run
+ """
+ self.asup_log_for_cserver("na_ontap_snapshot_policy")
+ current = self.get_snapshot_policy()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ # Don't sort schedule/prefix/count/snapmirror_label lists as it can
+ # mess up the intended parameter order.
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_snapshot_policy()
+ elif cd_action == 'delete':
+ self.delete_snapshot_policy()
+ if modify:
+ self.modify_snapshot_policy(current)
+ self.modify_snapshot_policy_schedules(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates and deletes a Snapshot Policy
+ """
+ obj = NetAppOntapSnapshotPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py
new file mode 100644
index 00000000..ae49a721
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+"""
+create SNMP module to add/delete/modify SNMP user
+"""
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create/Delete SNMP community"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_snmp
+options:
+ access_control:
+ description:
+ - "Access control for the community. The only supported value is 'ro' (read-only)"
+ required: true
+ type: str
+ community_name:
+ description:
+ - "The name of the SNMP community to manage."
+ required: true
+ type: str
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified SNMP community should exist or not."
+ default: 'present'
+ type: str
+short_description: NetApp ONTAP SNMP community
+version_added: 2.6.0
+'''
+
+EXAMPLES = """
+ - name: Create SNMP community
+ na_ontap_snmp:
+ state: present
+ community_name: communityName
+ access_control: 'ro'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete SNMP community
+ na_ontap_snmp:
+ state: absent
+ community_name: communityName
+ access_control: 'ro'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSnmp(object):
+ '''Class with SNMP methods, doesn't support check mode'''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ community_name=dict(required=True, type='str'),
+ access_control=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ parameters = self.module.params
+ # set up state variables
+ self.state = parameters['state']
+ self.community_name = parameters['community_name']
+ self.access_control = parameters['access_control']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def invoke_snmp_community(self, zapi):
+ """
+ Invoke zapi - add/delete take the same NaElement structure
+ @return: SUCCESS / FAILURE with an error_message
+ """
+ snmp_community = netapp_utils.zapi.NaElement.create_node_with_children(
+ zapi, **{'community': self.community_name,
+ 'access-control': self.access_control})
+ try:
+ self.server.invoke_successfully(snmp_community, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError: # return False for duplicate entry
+ return False
+ return True
+
+ def add_snmp_community(self):
+ """
+ Adds a SNMP community
+ """
+ return self.invoke_snmp_community('snmp-community-add')
+
+ def delete_snmp_community(self):
+ """
+ Delete a SNMP community
+ """
+ return self.invoke_snmp_community('snmp-community-delete')
+
+ def apply(self):
+ """
+ Apply action to SNMP community
+ This module is not idempotent:
+ Add doesn't fail the playbook if user is trying
+ to add an already existing snmp community
+ """
+ changed = False
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_snmp", cserver)
+ if self.state == 'present': # add
+ if self.add_snmp_community():
+ changed = True
+ elif self.state == 'absent': # delete
+ if self.delete_snmp_community():
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ '''Execute action'''
+ community_obj = NetAppONTAPSnmp()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py
new file mode 100644
index 00000000..916bd5a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+"""
+create SNMP module to add/delete/modify SNMP user
+"""
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_snmp_traphosts
+short_description: NetApp ONTAP SNMP traphosts.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Whether the specified SNMP traphost should exist or not. Requires REST with 9.7 or higher
+options:
+ ip_address:
+ description:
+ - "The IP address of the SNMP traphost to manage."
+ required: true
+ type: str
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified SNMP traphost should exist or not."
+ default: 'present'
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create SNMP traphost
+ na_ontap_snmp:
+ state: present
+ ip_address: '10.10.10.10'
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Delete SNMP traphost
+ na_ontap_snmp:
+ state: absent
+ ip_address: '10.10.10.10'
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPSnmpTraphosts(object):
+ """Class with SNMP methods"""
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ ip_address=dict(required=True, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ if not self.rest_api.is_rest():
+ self.module.fail_json(msg="na_ontap_snmp_traphosts only support Rest and ONTAP 9.6+")
+
+ def get_snmp_traphosts(self):
+ params = {'ip_address': self.parameters['ip_address']}
+ api = 'support/snmp/traphosts'
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ if not message['records']:
+ return None
+ return message['records']
+
+ def create_snmp_traphost(self):
+ api = '/support/snmp/traphosts'
+ params = {'host': self.parameters['ip_address']}
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def delete_snmp_traphost(self):
+ api = '/support/snmp/traphosts/' + self.parameters['ip_address']
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg="Error deleting traphost: %s" % error)
+
+ def apply(self):
+ """
+ Apply action to SNMP traphost
+ """
+ current = self.get_snmp_traphosts()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_snmp_traphost()
+ elif cd_action == 'delete':
+ self.delete_snmp_traphost()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPSnmpTraphosts()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py
new file mode 100644
index 00000000..27d3ec48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py
@@ -0,0 +1,417 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_software_update
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Update ONTAP software
+ - Requires an https connection and is not supported over http
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_software_update
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified ONTAP package should update or not.
+ default: present
+ type: str
+ nodes:
+ description:
+ - List of nodes to be updated, the nodes have to be a part of a HA Pair.
+ aliases:
+ - node
+ type: list
+ elements: str
+ package_version:
+ required: true
+ description:
+ - Specifies the package version to update software.
+ type: str
+ package_url:
+ required: true
+ type: str
+ description:
+ - Specifies the package URL to download the package.
+ ignore_validation_warning:
+ description:
+ - Allows the update to continue if warnings are encountered during the validation phase.
+ default: False
+ type: bool
+ download_only:
+ description:
+ - Allows to download image without update.
+ default: False
+ type: bool
+ version_added: 20.4.0
+ stabilize_minutes:
+ description:
+ - Number of minutes that the update should wait after a takeover or giveback is completed.
+ type: int
+ version_added: 20.6.0
+ timeout:
+ description:
+ - how long to wait for the update to complete, in seconds.
+ default: 1800
+ type: int
+ force_update:
+ description:
+ - force an update, even if package_version matches what is reported as installed.
+ default: false
+ type: bool
+ version_added: 20.11.0
+short_description: NetApp ONTAP Update Software
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ - name: ONTAP software update
+ na_ontap_software_update:
+ state: present
+ nodes: vsim1
+ package_url: "{{ url }}"
+ package_version: "{{ version_name }}"
+ ignore_validation_warning: True
+ download_only: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSoftwareUpdate(object):
+ """
+ Class with ONTAP software update methods
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ nodes=dict(required=False, type='list', elements='str', aliases=["node"]),
+ package_version=dict(required=True, type='str'),
+ package_url=dict(required=True, type='str'),
+ ignore_validation_warning=dict(required=False, type='bool', default=False),
+ download_only=dict(required=False, type='bool', default=False),
+ stabilize_minutes=dict(required=False, type='int'),
+ timeout=dict(required=False, type='int', default=1800),
+ force_update=dict(required=False, type='bool', default=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ @staticmethod
+ def cluster_image_get_iter():
+ """
+ Compose NaElement object to query current version
+ :return: NaElement object for cluster-image-get-iter with query
+ """
+ cluster_image_get = netapp_utils.zapi.NaElement('cluster-image-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cluster_image_info = netapp_utils.zapi.NaElement('cluster-image-info')
+ query.add_child_elem(cluster_image_info)
+ cluster_image_get.add_child_elem(query)
+ return cluster_image_get
+
+ def cluster_image_get(self):
+ """
+ Get current cluster image info
+ :return: True if query successful, else return None
+ """
+ cluster_image_get_iter = self.cluster_image_get_iter()
+ try:
+ result = self.server.invoke_successfully(cluster_image_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image details: %s: %s'
+ % (self.parameters['package_version'], to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image details
+ node_versions = list()
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ for image_info in result.get_child_by_name('attributes-list').get_children():
+ node_versions.append((image_info.get_child_content('node-id'), image_info.get_child_content('current-version')))
+ return node_versions
+
+ def cluster_image_get_for_node(self, node_name):
+ """
+ Get current cluster image info for given node
+ """
+ cluster_image_get = netapp_utils.zapi.NaElement('cluster-image-get')
+ cluster_image_get.add_new_child('node-id', node_name)
+ try:
+ result = self.server.invoke_successfully(cluster_image_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image details for %s: %s'
+ % (node_name, to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image version
+ if result.get_child_by_name('attributes').get_child_by_name('cluster-image-info'):
+ image_info = result.get_child_by_name('attributes').get_child_by_name('cluster-image-info')
+ if image_info:
+ return image_info.get_child_content('node-id'), image_info.get_child_content('current-version')
+ return None, None
+
+ @staticmethod
+ def get_localname(tag):
+ return netapp_utils.zapi.etree.QName(tag).localname
+
+ def cluster_image_update_progress_get(self, ignore_connection_error=True):
+ """
+ Get current cluster image update progress info
+ :return: Dictionary of cluster image update progress if query successful, else return None
+ """
+ cluster_update_progress_get = netapp_utils.zapi.NaElement('cluster-image-update-progress-info')
+ cluster_update_progress_info = dict()
+ try:
+ result = self.server.invoke_successfully(cluster_update_progress_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # return empty dict on error to satisfy package delete upon image update
+ if ignore_connection_error:
+ return cluster_update_progress_info
+ self.module.fail_json(msg='Error fetching cluster image update progress details: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image update progress details
+ if result.get_child_by_name('attributes').get_child_by_name('ndu-progress-info'):
+ update_progress_info = result.get_child_by_name('attributes').get_child_by_name('ndu-progress-info')
+ cluster_update_progress_info['overall_status'] = update_progress_info.get_child_content('overall-status')
+ cluster_update_progress_info['completed_node_count'] = update_progress_info.\
+ get_child_content('completed-node-count')
+ reports = update_progress_info.get_child_by_name('validation-reports')
+ if reports:
+ cluster_update_progress_info['validation_reports'] = list()
+ for report in reports.get_children():
+ checks = dict()
+ for check in report.get_children():
+ checks[self.get_localname(check.get_name())] = check.get_content()
+ cluster_update_progress_info['validation_reports'].append(checks)
+ return cluster_update_progress_info
+
+ def cluster_image_update(self):
+ """
+ Update current cluster image
+ """
+ cluster_update_info = netapp_utils.zapi.NaElement('cluster-image-update')
+ cluster_update_info.add_new_child('package-version', self.parameters['package_version'])
+ cluster_update_info.add_new_child('ignore-validation-warning',
+ str(self.parameters['ignore_validation_warning']))
+ if self.parameters.get('stabilize_minutes'):
+ cluster_update_info.add_new_child('stabilize-minutes',
+ self.na_helper.get_value_for_int(False, self.parameters['stabilize_minutes']))
+ if self.parameters.get('nodes'):
+ cluster_nodes = netapp_utils.zapi.NaElement('nodes')
+ for node in self.parameters['nodes']:
+ cluster_nodes.add_new_child('node-name', node)
+ cluster_update_info.add_child_elem(cluster_nodes)
+ try:
+ self.server.invoke_successfully(cluster_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ msg = 'Error updating cluster image for %s: %s' % (self.parameters['package_version'], to_native(error))
+ cluster_update_progress_info = self.cluster_image_update_progress_get(ignore_connection_error=True)
+ validation_reports = str(cluster_update_progress_info.get('validation_reports'))
+ if validation_reports == "None":
+ validation_reports = str(self.cluster_image_validate())
+ self.module.fail_json(msg=msg, validation_reports=validation_reports, exception=traceback.format_exc())
+
+ def cluster_image_package_download(self):
+ """
+ Get current cluster image package download
+ :return: True if package already exists, else return False
+ """
+ cluster_image_package_download_info = netapp_utils.zapi.NaElement('cluster-image-package-download')
+ cluster_image_package_download_info.add_new_child('package-url', self.parameters['package_url'])
+ try:
+ self.server.invoke_successfully(cluster_image_package_download_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 18408 denotes Package image with the same name already exists
+ if to_native(error.code) == "18408":
+ # TODO: if another package is using the same image name, we're stuck
+ return True
+ else:
+ self.module.fail_json(msg='Error downloading cluster image package for %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+ return False
+
+ def cluster_image_package_delete(self):
+ """
+ Delete current cluster image package
+ """
+ cluster_image_package_delete_info = netapp_utils.zapi.NaElement('cluster-image-package-delete')
+ cluster_image_package_delete_info.add_new_child('package-version', self.parameters['package_version'])
+ try:
+ self.server.invoke_successfully(cluster_image_package_delete_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cluster image package for %s: %s'
+ % (self.parameters['package_version'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def cluster_image_package_download_progress(self):
+ """
+ Get current cluster image package download progress
+ :return: Dictionary of cluster image download progress if query successful, else return None
+ """
+ cluster_image_package_download_progress_info = netapp_utils.zapi.\
+ NaElement('cluster-image-get-download-progress')
+ try:
+ result = self.server.invoke_successfully(
+ cluster_image_package_download_progress_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image package download progress for %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image download progress details
+ cluster_download_progress_info = dict()
+ if result.get_child_by_name('progress-status'):
+ cluster_download_progress_info['progress_status'] = result.get_child_content('progress-status')
+ cluster_download_progress_info['progress_details'] = result.get_child_content('progress-details')
+ cluster_download_progress_info['failure_reason'] = result.get_child_content('failure-reason')
+ return cluster_download_progress_info
+ return None
+
+ def cluster_image_validate(self):
+ """
+ Validate that NDU is feasible.
+ :return: List of dictionaries
+ """
+ cluster_image_validation_info = netapp_utils.zapi.NaElement('cluster-image-validate')
+ cluster_image_validation_info.add_new_child('package-version', self.parameters['package_version'])
+ try:
+ result = self.server.invoke_successfully(
+ cluster_image_validation_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ msg = 'Error running cluster image validate: %s' % to_native(error)
+ return msg
+ # return cluster validation report
+ cluster_report_info = list()
+ if result.get_child_by_name('cluster-image-validation-report-list'):
+ for report in result.get_child_by_name('cluster-image-validation-report-list').get_children():
+ cluster_report_info.append(dict(
+ ndu_check=report.get_child_content('ndu-check'),
+ ndu_status=report.get_child_content('ndu-status'),
+ required_action=report.get_child_content('required-action')
+ ))
+ return cluster_report_info
+
+ def autosupport_log(self):
+ """
+ Autosupport log for software_update
+ :return:
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_software_update", cserver)
+
+ def is_update_required(self):
+ ''' return True if at least one node is not at the correct version '''
+ if self.parameters.get('nodes'):
+ versions = [self.cluster_image_get_for_node(node) for node in self.parameters['nodes']]
+ else:
+ versions = self.cluster_image_get()
+ current_versions = set([x[1] for x in versions])
+ if len(current_versions) != 1:
+ # mixed set, need to update
+ return True
+ # only update if versions differ
+ return current_versions.pop() != self.parameters['package_version']
+
+ def apply(self):
+ """
+ Apply action to update ONTAP software
+ """
+ # TODO: cluster image update only works for HA configurations.
+ # check if node image update can be used for other cases.
+ if self.parameters.get('https') is not True:
+ self.module.fail_json(msg='https parameter must be True')
+ self.autosupport_log()
+ changed = self.parameters['force_update'] or self.is_update_required()
+ validation_reports = 'only available after update'
+ if not self.module.check_mode and changed:
+ if self.parameters.get('state') == 'present':
+ package_exists = self.cluster_image_package_download()
+ if package_exists is False:
+ cluster_download_progress = self.cluster_image_package_download_progress()
+ while cluster_download_progress.get('progress_status') == 'async_pkg_get_phase_running':
+ time.sleep(5)
+ cluster_download_progress = self.cluster_image_package_download_progress()
+ if not cluster_download_progress.get('progress_status') == 'async_pkg_get_phase_complete':
+ self.module.fail_json(msg='Error downloading package: %s'
+ % (cluster_download_progress['failure_reason']))
+ if self.parameters['download_only'] is False:
+ self.cluster_image_update()
+ # delete package once update is completed
+ cluster_update_progress = dict()
+ time_left = self.parameters['timeout']
+ polling_interval = 25
+ # assume in_progress if dict is empty
+ while time_left > 0 and cluster_update_progress.get('overall_status', 'in_progress') == 'in_progress':
+ time.sleep(polling_interval)
+ time_left -= polling_interval
+ cluster_update_progress = self.cluster_image_update_progress_get(ignore_connection_error=True)
+ if cluster_update_progress.get('overall_status') == 'completed':
+ validation_reports = str(cluster_update_progress.get('validation_reports'))
+ self.cluster_image_package_delete()
+ else:
+ cluster_update_progress = self.cluster_image_update_progress_get(ignore_connection_error=False)
+ if cluster_update_progress.get('overall_status') != 'completed':
+ if cluster_update_progress.get('overall_status') == 'in_progress':
+ msg = 'Timeout error'
+ action = ' Should the timeout value be increased? Current value is %d seconds.' % self.parameters['timeout']
+ action += ' The software update continues in background.'
+ else:
+ msg = 'Error'
+ action = ''
+ msg += ' updating image: overall_status: %s.' % (cluster_update_progress.get('overall_status', 'cannot get status'))
+ msg += action
+ validation_reports = str(cluster_update_progress.get('validation_reports'))
+ self.module.fail_json(msg=msg, validation_reports=validation_reports)
+
+ self.module.exit_json(changed=changed, validation_reports=validation_reports)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPSoftwareUpdate()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py
new file mode 100644
index 00000000..d45817f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Run cli commands on ONTAP over SSH using paramiko.
+ - Output is returned in C(stdout) and C(stderr), and also as C(stdout_lines), C(stdout_lines_filtered), C(stderr_lines).
+ - Note that the module can succeed even though the command failed. You need to analyze stdout and check the results.
+ - If the SSH host key is unknown and accepted, C(warnings) is updated.
+ - Options related to ZAPI or REST APIs are ignored.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_ssh_command
+short_description: NetApp ONTAP Run any cli command over plain SSH using paramiko.
+version_added: 20.8.0
+options:
+ command:
+ description:
+ - a string containing the command and arguments.
+ required: true
+ type: str
+ privilege:
+ description:
+ - privilege level at which to run the command, eg admin, advanced.
+ - if set, the command is prefixed with C(set -privilege <privilege>;).
+ type: str
+ accept_unknown_host_keys:
+ description:
+ - When false, reject the connection if the host key is not in known_hosts file.
+ - When true, if the host key is unknown, accept it, but report a warning.
+ - Note that the key is not added to the file. You could add the key by manually using SSH.
+ type: bool
+ default: false
+ include_lines:
+ description:
+ - return only lines containing string pattern in C(stdout_lines_filtered)
+ default: ''
+ type: str
+ exclude_lines:
+ description:
+ - return only lines containing string pattern in C(stdout_lines_filtered)
+ default: ''
+ type: str
+ service_processor:
+ description:
+ - whether the target system is ONTAP or the service processor (SP)
+ - only menaningful when privilege is set
+ aliases: [sp]
+ default: false
+ type: bool
+'''
+
+EXAMPLES = """
+ - name: run ontap cli command using SSH
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: version
+
+ # Same as above, with parameters
+ - name: run ontap cli command
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: node show -fields node,health,uptime,model
+ privilege: admin
+
+ # Same as above, but with lines filtering
+ - name: run ontap cli command
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: node show -fields node,health,uptime,model
+ exclude_lines: 'ode ' # Exclude lines with 'Node ' or 'node'
+ # use with caution!
+ accept_unknown_host_keys: true
+ privilege: admin
+
+ - name: run ontap SSH command on SP
+ na_ontap_ssh_command:
+ # <<: *sp_login
+ command: sp switch-version
+ privilege: diag
+ sp: true
+ register: result
+ - debug: var=result
+"""
+
+RETURN = """
+stdout_lines_filtered:
+ description:
+ - In addition to stdout and stdout_lines, a list of non-white lines, excluding last and failed login information.
+ - The list can be further refined using the include_lines and exclude_lines filters.
+ returned: always
+ type: list
+"""
+
+import traceback
+import warnings
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+
+class NetAppONTAPSSHCommand(object):
+ ''' calls a CLI command using SSH'''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='str'),
+ privilege=dict(required=False, type='str'),
+ accept_unknown_host_keys=dict(required=False, type='bool', default=False),
+ include_lines=dict(required=False, type='str', default=''),
+ exclude_lines=dict(required=False, type='str', default=''),
+ service_processor=dict(required=False, type='bool', default=False, aliases=['sp']),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.privilege = parameters['privilege']
+ self.include_lines = parameters['include_lines']
+ self.exclude_lines = parameters['exclude_lines']
+ self.accept_unknown_host_keys = parameters['accept_unknown_host_keys']
+ self.service_processor = parameters['service_processor']
+ self.warnings = list()
+ self.failed = False
+
+ if not HAS_PARAMIKO:
+ self.module.fail_json(msg="the python paramiko module is required")
+
+ client = paramiko.SSHClient()
+ client.load_system_host_keys() # load ~/.ssh/known_hosts if it exists
+ if self.accept_unknown_host_keys:
+ # accept unknown key, but raise a python warning
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ with warnings.catch_warnings(record=True) as wngs:
+ try:
+ client.connect(hostname=parameters['hostname'], username=parameters['username'], password=parameters['password'])
+ if len(wngs) > 0:
+ self.warnings.extend([str(warning.message) for warning in wngs])
+ except paramiko.SSHException as exc:
+ self.module.fail_json(msg="SSH connection failed: %s" % repr(exc))
+
+ self.client = client
+
+ def parse_output(self, out):
+ out_string = out.read()
+ # ONTAP makes copious use of \r
+ out_string = out_string.replace(b'\r\r\n', b'\n')
+ out_string = out_string.replace(b'\r\n', b'\n')
+ return(out_string)
+
+ def run_ssh_command(self, command):
+ ''' calls SSH '''
+ try:
+ stdin, stdout, stderr = self.client.exec_command(command)
+ except paramiko.SSHException as exc:
+ self.module.fail_json(msg='Error running command %s: %s' %
+ (command, to_native(exc)),
+ exception=traceback.format_exc())
+ stdin.close() # if we don't close, we may see a TypeError
+ return stdout, stderr
+
+ def filter_output(self, output):
+ ''' Generate stdout_lines_filtered list
+ Remove login information if found in the first non white lines
+ '''
+ result = list()
+ find_banner = True
+ for line in output.splitlines():
+ try:
+ stripped_line = line.strip().decode()
+ except Exception as exc:
+ self.warnings.append("Unable to decode ONTAP output. Skipping filtering. Error: %s" % repr(exc))
+ result.append('ERROR: truncated, cannot decode: %s' % line)
+ self.failed = False
+ return result
+
+ if not stripped_line:
+ continue
+ if find_banner and stripped_line.startswith(('Last login time:', 'Unsuccessful login attempts since last login:')):
+ continue
+ find_banner = False
+ if self.exclude_lines:
+ if self.include_lines in stripped_line and self.exclude_lines not in stripped_line:
+ result.append(stripped_line)
+ elif self.include_lines:
+ if self.include_lines in stripped_line:
+ result.append(stripped_line)
+ else:
+ result.append(stripped_line)
+
+ return result
+
+ def run_command(self):
+ ''' calls SSH '''
+ # self.ems()
+ command = self.command
+ if self.privilege is not None:
+ if self.service_processor:
+ command = "priv set %s;%s" % (self.privilege, command)
+ else:
+ command = "set -privilege %s;%s" % (self.privilege, command)
+ stdout, stderr = self.run_ssh_command(command)
+ stdout_string = self.parse_output(stdout)
+ stdout_filtered = self.filter_output(stdout_string)
+ return stdout_string, stdout_filtered, self.parse_output(stderr)
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = True
+ stdout, filtered, stderr = '', '', ''
+ if not self.module.check_mode:
+ stdout, filtered, stderr = self.run_command()
+ if stderr:
+ self.failed = True
+ self.module.exit_json(changed=changed, failed=self.failed, stdout=stdout, stdout_lines_filtered=filtered, stderr=stderr, warnings=self.warnings)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPSSHCommand()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py
new file mode 100644
index 00000000..c53dee2c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py
@@ -0,0 +1,540 @@
+#!/usr/bin/python
+
+# (c) 2018-2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_svm
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_svm
+
+short_description: NetApp ONTAP SVM
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, modify or delete SVM on NetApp ONTAP
+
+options:
+
+ state:
+ description:
+ - Whether the specified SVM should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the SVM to manage.
+ type: str
+ required: true
+
+ from_name:
+ description:
+ - Name of the SVM to be renamed
+ type: str
+ version_added: 2.7.0
+
+ root_volume:
+ description:
+ - Root volume of the SVM.
+ - Cannot be modified after creation.
+ type: str
+
+ root_volume_aggregate:
+ description:
+ - The aggregate on which the root volume will be created.
+ - Cannot be modified after creation.
+ type: str
+
+ root_volume_security_style:
+ description:
+ - Security Style of the root volume.
+ - When specified as part of the vserver-create,
+ this field represents the security style for the Vserver root volume.
+ - When specified as part of vserver-get-iter call,
+ this will return the list of matching Vservers.
+ - The 'unified' security style, which applies only to Infinite Volumes,
+ cannot be applied to a Vserver's root volume.
+ - Cannot be modified after creation.
+ choices: ['unix', 'ntfs', 'mixed', 'unified']
+ type: str
+
+ allowed_protocols:
+ description:
+ - Allowed Protocols.
+ - When specified as part of a vserver-create,
+ this field represent the list of protocols allowed on the Vserver.
+ - When part of vserver-get-iter call,
+ this will return the list of Vservers
+ which have any of the protocols specified
+ as part of the allowed-protocols.
+ - When part of vserver-modify,
+ this field should include the existing list
+ along with new protocol list to be added to prevent data disruptions.
+ - Possible values
+ - nfs NFS protocol,
+ - cifs CIFS protocol,
+ - fcp FCP protocol,
+ - iscsi iSCSI protocol,
+ - ndmp NDMP protocol,
+ - http HTTP protocol,
+ - nvme NVMe protocol
+ type: list
+ elements: str
+
+ aggr_list:
+ description:
+ - List of aggregates assigned for volume operations.
+ - These aggregates could be shared for use with other Vservers.
+ - When specified as part of a vserver-create,
+ this field represents the list of aggregates
+ that are assigned to the Vserver for volume operations.
+ - When part of vserver-get-iter call,
+ this will return the list of Vservers
+ which have any of the aggregates specified as part of the aggr list.
+ type: list
+ elements: str
+
+ ipspace:
+ description:
+ - IPSpace name
+ - Cannot be modified after creation.
+ type: str
+ version_added: 2.7.0
+
+
+ snapshot_policy:
+ description:
+ - Default snapshot policy setting for all volumes of the Vserver.
+ This policy will be assigned to all volumes created in this
+ Vserver unless the volume create request explicitly provides a
+ snapshot policy or volume is modified later with a specific
+ snapshot policy. A volume-level snapshot policy always overrides
+ the default Vserver-wide snapshot policy.
+ version_added: 2.7.0
+ type: str
+
+ language:
+ description:
+ - Language to use for the SVM
+ - Default to C.UTF-8
+ - Possible values Language
+ - c POSIX
+ - ar Arabic
+ - cs Czech
+ - da Danish
+ - de German
+ - en English
+ - en_us English (US)
+ - es Spanish
+ - fi Finnish
+ - fr French
+ - he Hebrew
+ - hr Croatian
+ - hu Hungarian
+ - it Italian
+ - ja Japanese euc-j
+ - ja_v1 Japanese euc-j
+ - ja_jp.pck Japanese PCK (sjis)
+ - ja_jp.932 Japanese cp932
+ - ja_jp.pck_v2 Japanese PCK (sjis)
+ - ko Korean
+ - no Norwegian
+ - nl Dutch
+ - pl Polish
+ - pt Portuguese
+ - ro Romanian
+ - ru Russian
+ - sk Slovak
+ - sl Slovenian
+ - sv Swedish
+ - tr Turkish
+ - zh Simplified Chinese
+ - zh.gbk Simplified Chinese (GBK)
+ - zh_tw Traditional Chinese euc-tw
+ - zh_tw.big5 Traditional Chinese Big 5
+ - utf8mb4
+ - Most of the values accept a .utf_8 suffix, e.g. fr.utf_8
+ type: str
+ version_added: 2.7.0
+
+ subtype:
+ description:
+ - The subtype for vserver to be created.
+ - Cannot be modified after creation.
+ choices: ['default', 'dp_destination', 'sync_source', 'sync_destination']
+ type: str
+ version_added: 2.7.0
+
+ comment:
+ description:
+ - When specified as part of a vserver-create, this field represents the comment associated with the Vserver.
+ - When part of vserver-get-iter call, this will return the list of matching Vservers.
+ type: str
+ version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create SVM
+ na_ontap_svm:
+ state: present
+ name: ansibleVServer
+ root_volume: vol1
+ root_volume_aggregate: aggr1
+ root_volume_security_style: mixed
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+import copy
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.zapis_svm as zapis
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSVM(object):
+ ''' create, delete, modify, rename SVM (aka vserver) '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ root_volume=dict(type='str'),
+ root_volume_aggregate=dict(type='str'),
+ root_volume_security_style=dict(type='str', choices=['unix',
+ 'ntfs',
+ 'mixed',
+ 'unified'
+ ]),
+ allowed_protocols=dict(type='list', elements='str'),
+ aggr_list=dict(type='list', elements='str'),
+ ipspace=dict(type='str', required=False),
+ snapshot_policy=dict(type='str', required=False),
+ language=dict(type='str', required=False),
+ subtype=dict(type='str', choices=['default', 'dp_destination', 'sync_source', 'sync_destination']),
+ comment=dict(type="str", required=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Ontap documentation uses C.UTF-8, but actually stores as c.utf_8.
+ if 'language' in self.parameters and self.parameters['language'].lower() == 'c.utf-8':
+ self.parameters['language'] = 'c.utf_8'
+
+ self.rest_api = OntapRestAPI(self.module)
+ # with REST, to force synchronous operations
+ self.timeout = self.rest_api.timeout
+ # root volume not supported with rest api
+ unsupported_rest_properties = ['root_volume', 'root_volume_aggregate', 'root_volume_security_style']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ @staticmethod
+ def clean_up_output(vserver_details):
+ vserver_details['root_volume'] = None
+ vserver_details['root_volume_aggregate'] = None
+ vserver_details['root_volume_security_style'] = None
+ vserver_details['aggr_list'] = []
+ for aggr in vserver_details['aggregates']:
+ vserver_details['aggr_list'].append(aggr['name'])
+ vserver_details.pop('aggregates')
+ vserver_details['ipspace'] = vserver_details['ipspace']['name']
+ vserver_details['snapshot_policy'] = vserver_details['snapshot_policy']['name']
+ vserver_details['allowed_protocols'] = []
+ if 'cifs' in vserver_details:
+ if vserver_details['cifs']['enabled']:
+ vserver_details['allowed_protocols'].append('cifs')
+ vserver_details.pop('cifs')
+ if 'fcp' in vserver_details:
+ if vserver_details['fcp']['enabled']:
+ vserver_details['allowed_protocols'].append('fcp')
+ vserver_details.pop('fcp')
+ if 'issi' in vserver_details:
+ if vserver_details['iscsi']['enabled']:
+ vserver_details['allowed_protocols'].append('iscsi')
+ vserver_details.pop('iscsi')
+ if 'nvme' in vserver_details:
+ if vserver_details['nvme']['enabled']:
+ vserver_details['allowed_protocols'].append('nvme')
+ vserver_details.pop('nvme')
+ if 'nfs' in vserver_details:
+ if vserver_details['nfs']['enabled']:
+ vserver_details['allowed_protocols'].append('nfs')
+ vserver_details.pop('nfs')
+ return vserver_details
+
+ def get_vserver(self, vserver_name=None):
+ """
+ Checks if vserver exists.
+
+ :return:
+ vserver object if vserver found
+ None if vserver is not found
+ :rtype: object/None
+ """
+ if vserver_name is None:
+ vserver_name = self.parameters['name']
+
+ if self.use_rest:
+ api = 'svm/svms'
+ params = {'fields': 'subtype,aggregates,language,snapshot_policy,ipspace,comment,nfs,cifs,fcp,iscsi,nvme'}
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response in get_net_route from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ vserver_details = None
+ for record in message['records']:
+ if record['name'] == vserver_name:
+ vserver_details = copy.deepcopy(record)
+ break
+ if vserver_details is None:
+ return None
+ return self.clean_up_output(vserver_details)
+
+ else:
+ return zapis.get_vserver(self.server, vserver_name)
+
+ def create_vserver(self):
+ if self.use_rest:
+ api = 'svm/svms'
+ params = {'name': self.parameters['name']}
+ if self.parameters.get('language'):
+ params['language'] = self.parameters['language']
+ if self.parameters.get('ipspace'):
+ params['ipspace'] = self.parameters['ipspace']
+ if self.parameters.get('snapshot_policy'):
+ params['snapshot_policy'] = self.parameters['snapshot_policy']
+ if self.parameters.get('subtype'):
+ params['subtype'] = self.parameters['subtype']
+ if self.parameters.get('comment'):
+ params['comment'] = self.parameters['comment']
+ if self.parameters.get('aggr_list'):
+ params['aggregates'] = []
+ for aggr in self.parameters['aggr_list']:
+ params['aggregates'].append({'name': aggr})
+ if self.parameters.get('allowed_protocols'):
+ for protocol in self.parameters['allowed_protocols']:
+ params[protocol] = {'enabled': 'true'}
+ # for a sync operation
+ data = {'return_timeout': self.timeout}
+ __, error = self.rest_api.post(api, params, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ options = {'vserver-name': self.parameters['name']}
+ self.add_parameter_to_dict(options, 'root_volume', 'root-volume')
+ self.add_parameter_to_dict(options, 'root_volume_aggregate', 'root-volume-aggregate')
+ self.add_parameter_to_dict(options, 'root_volume_security_style', 'root-volume-security-style')
+ self.add_parameter_to_dict(options, 'language', 'language')
+ self.add_parameter_to_dict(options, 'ipspace', 'ipspace')
+ self.add_parameter_to_dict(options, 'snapshot_policy', 'snapshot-policy')
+ self.add_parameter_to_dict(options, 'subtype', 'vserver-subtype')
+ self.add_parameter_to_dict(options, 'comment', 'comment')
+ vserver_create = netapp_utils.zapi.NaElement.create_node_with_children('vserver-create', **options)
+ try:
+ self.server.invoke_successfully(vserver_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error provisioning SVM %s: %s'
+ % (self.parameters['name'], to_native(exc)),
+ exception=traceback.format_exc())
+ # add allowed-protocols, aggr-list after creation,
+ # since vserver-create doesn't allow these attributes during creation
+ options = dict()
+ for key in ('allowed_protocols', 'aggr_list'):
+ if self.parameters.get(key):
+ options[key] = self.parameters[key]
+ if options:
+ self.modify_vserver(options)
+
+ def delete_vserver(self, current=None):
+ if self.use_rest:
+ if current is None:
+ self.module.fail_json(msg='Internal error, expecting SVM object in delete')
+ api = 'svm/svms/%s' % current['uuid']
+ # for a sync operation
+ query = {'return_timeout': self.timeout}
+ __, error = self.rest_api.delete(api, params=query)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-destroy', **{'vserver-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(vserver_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting SVM %s: %s'
+ % (self.parameters['name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def rename_vserver(self, current=None):
+ if self.use_rest:
+ if current is None:
+ self.module.fail_json(msg='Internal error, expecting SVM object in rename')
+ api = 'svm/svms/%s' % current['uuid']
+ params = {'name': self.parameters['name']}
+ # for a sync operation
+ data = {'return_timeout': self.timeout}
+ __, error = self.rest_api.patch(api, params, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-rename', **{'vserver-name': self.parameters['from_name'],
+ 'new-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(vserver_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error renaming SVM %s: %s'
+ % (self.parameters['from_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def modify_vserver(self, modify, current=None):
+ '''
+ Modify vserver.
+ :param modify: list of modify attributes
+ :param current: with rest, SVM object to modify
+ '''
+ if self.use_rest:
+ if current is None:
+ self.module.fail_json(msg='Internal error, expecting SVM object in modify')
+ api = 'svm/svms/%s' % current['uuid']
+ for attribute in modify:
+ if attribute == 'snapshot_policy' or attribute == 'allowed_protocols' or attribute == 'aggr_list':
+ self.module.fail_json(msg='REST API does not support modify of %s' % attribute)
+ # for a sync operation
+ data = {'return_timeout': self.timeout}
+ __, error = self.rest_api.patch(api, modify, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ zapis.modify_vserver(self.server, self.module, self.parameters['name'], modify, self.parameters)
+
+ def add_parameter_to_dict(self, adict, name, key=None, tostr=False):
+ '''
+ add defined parameter (not None) to adict using key.
+ :param adict: a dictionary.
+ :param name: name in self.parameters.
+ :param key: key in adict.
+ :param tostr: boolean.
+ '''
+ if key is None:
+ key = name
+ if self.parameters.get(name) is not None:
+ if tostr:
+ adict[key] = str(self.parameters.get(name))
+ else:
+ adict[key] = self.parameters.get(name)
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ if not self.use_rest:
+ self.asup_log_for_cserver("na_ontap_svm")
+ current = self.get_vserver()
+ cd_action, rename = None, None
+ if self.parameters.get('from_name'):
+ old_svm = self.get_vserver(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_svm, current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming SVM %s: no SVM with from_name %s.' % (self.parameters['name'], self.parameters['from_name']))
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ for attribute in modify:
+ if attribute in ['root_volume', 'root_volume_aggregate', 'root_volume_security_style', 'subtype', 'ipspace']:
+ self.module.fail_json(msg='Error modifying SVM %s: can not modify %s.' % (self.parameters['name'], attribute))
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_vserver(old_svm)
+ # If rename is True, cd_action is None, but modify could be true or false.
+ if cd_action == 'create':
+ self.create_vserver()
+ elif cd_action == 'delete':
+ self.delete_vserver(current)
+ elif modify:
+ self.modify_vserver(modify, current)
+
+ results = dict(changed=self.na_helper.changed)
+ if modify:
+ if netapp_utils.has_feature(self.module, 'show_modified'):
+ results['modify'] = str(modify)
+ if 'aggr_list' in modify:
+ if '*' in modify['aggr_list']:
+ results['warnings'] = "Changed always 'True' when aggr_list is '*'."
+ self.module.exit_json(**results)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ svm = NetAppOntapSVM()
+ svm.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py
new file mode 100644
index 00000000..3480a827
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP Modify SVM Options
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Modify ONTAP SVM Options
+ - Only Options that appear on "vserver options show" can be set
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_svm_options
+version_added: 2.7.0
+options:
+ name:
+ description:
+ - Name of the option.
+ type: str
+ value:
+ description:
+ - Value of the option.
+ - Value must be in quote
+ type: str
+ vserver:
+ description:
+ - The name of the vserver to which this option belongs to.
+ required: True
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Set SVM Options
+ na_ontap_svm_options:
+ vserver: "{{ netapp_vserver_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ name: snmp.enable
+ value: 'on'
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSvnOptions(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=False, type="str", default=None),
+ value=dict(required=False, type='str', default=None),
+ vserver=dict(required=True, type='str')
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def set_options(self):
+ """
+ Set a specific option
+ :return: None
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-set")
+ option_obj.add_new_child('name', self.parameters['name'])
+ option_obj.add_new_child('value', self.parameters['value'])
+ try:
+ self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error setting options: %s" % to_native(error), exception=traceback.format_exc())
+
+ def list_options(self):
+ """
+ List all Options on the Vserver
+ :return: None
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-list-info")
+ try:
+ self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting options: %s" % to_native(error), exception=traceback.format_exc())
+
+ def is_option_set(self):
+ """
+ Checks to see if an option is set or not
+ :return: If option is set return True, else return False
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-get-iter")
+ options_info = netapp_utils.zapi.NaElement("option-info")
+ if self.parameters.get('name') is not None:
+ options_info.add_new_child("name", self.parameters['name'])
+ if self.parameters.get('value') is not None:
+ options_info.add_new_child("value", self.parameters['value'])
+ if "vserver" in self.parameters.keys():
+ if self.parameters['vserver'] is not None:
+ options_info.add_new_child("vserver", self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement("query")
+ query.add_child_elem(options_info)
+ option_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error finding option: %s" % to_native(error), exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ return False
+
+ def apply(self):
+ changed = False
+ netapp_utils.ems_log_event("na_ontap_svm_options", self.server)
+ is_set = self.is_option_set()
+ if not is_set:
+ if self.module.check_mode:
+ pass
+ else:
+ self.set_options()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ :return: none
+ """
+ cg_obj = NetAppONTAPSvnOptions()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py
new file mode 100644
index 00000000..5c5f847b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+---
+
+module: na_ontap_ucadapter
+short_description: NetApp ONTAP UC adapter configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - modify the UC adapter mode and type taking pending type and mode into account.
+
+options:
+ state:
+ description:
+ - Whether the specified adapter should exist.
+ required: false
+ choices: ['present']
+ default: 'present'
+ type: str
+
+ adapter_name:
+ description:
+ - Specifies the adapter name.
+ required: true
+ type: str
+
+ node_name:
+ description:
+ - Specifies the adapter home node.
+ required: true
+ type: str
+
+ mode:
+ description:
+ - Specifies the mode of the adapter.
+ type: str
+
+ type:
+ description:
+ - Specifies the fc4 type of the adapter.
+ type: str
+
+ pair_adapters:
+ description:
+ - Specifies the list of adapters which also need to be offline along with the current adapter during modifying.
+ - If specified adapter works in a group or pair, the other adapters might also need to offline before modify the specified adapter.
+ - The mode of pair_adapters are modified along with the adapter, the type of the pair_adapters are not modified.
+ type: list
+ elements: str
+ version_added: '20.6.0'
+
+'''
+
+EXAMPLES = '''
+ - name: Modify adapter
+ na_ontap_adapter:
+ state: present
+ adapter_name: 0e
+ pair_adapters: 0f
+ node_name: laurentn-vsim1
+ mode: fc
+ type: target
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapadapter(object):
+ ''' object to describe adapter info '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present'], default='present', type='str'),
+ adapter_name=dict(required=True, type='str'),
+ node_name=dict(required=True, type='str'),
+ mode=dict(required=False, type='str'),
+ type=dict(required=False, type='str'),
+ pair_adapters=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_adapter(self):
+ """
+ Return details about the adapter
+ :param:
+ name : Name of the name of the adapter
+
+ :return: Details about the adapter. None if not found.
+ :rtype: dict
+ """
+ adapter_info = netapp_utils.zapi.NaElement('ucm-adapter-get')
+ adapter_info.add_new_child('adapter-name', self.parameters['adapter_name'])
+ adapter_info.add_new_child('node-name', self.parameters['node_name'])
+ try:
+ result = self.server.invoke_successfully(adapter_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching ucadapter details: %s: %s'
+ % (self.parameters['node_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes'):
+ adapter_attributes = result.get_child_by_name('attributes').\
+ get_child_by_name('uc-adapter-info')
+ return_value = {
+ 'mode': adapter_attributes.get_child_content('mode'),
+ 'pending-mode': adapter_attributes.get_child_content('pending-mode'),
+ 'type': adapter_attributes.get_child_content('fc4-type'),
+ 'pending-type': adapter_attributes.get_child_content('pending-fc4-type'),
+ 'status': adapter_attributes.get_child_content('status'),
+ }
+ return return_value
+ return None
+
+ def modify_adapter(self):
+ """
+ Modify the adapter.
+ """
+ params = {'adapter-name': self.parameters['adapter_name'],
+ 'node-name': self.parameters['node_name']}
+ if self.parameters.get('type') is not None:
+ params['fc4-type'] = self.parameters['type']
+ if self.parameters.get('mode') is not None:
+ params['mode'] = self.parameters['mode']
+ adapter_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ucm-adapter-modify', ** params)
+ try:
+ self.server.invoke_successfully(adapter_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying adapter %s: %s' % (self.parameters['adapter_name'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def online_or_offline_adapter(self, status, adapter_name):
+ """
+ Bring a Fibre Channel target adapter offline/online.
+ """
+ if status == 'down':
+ adapter = netapp_utils.zapi.NaElement('fcp-adapter-config-down')
+ elif status == 'up':
+ adapter = netapp_utils.zapi.NaElement('fcp-adapter-config-up')
+ adapter.add_new_child('fcp-adapter', adapter_name)
+ adapter.add_new_child('node', self.parameters['node_name'])
+ try:
+ self.server.invoke_successfully(adapter,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error trying to %s fc-adapter %s: %s' % (status, adapter_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ Autosupport log for ucadater
+ :return:
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_ucadapter", cserver)
+
+ def apply(self):
+ ''' calling all adapter features '''
+ changed = False
+ adapter_detail = self.get_adapter()
+
+ def need_to_change(expected, pending, current):
+ if expected is None:
+ return False
+ elif pending is not None:
+ return pending != expected
+ elif current is not None:
+ return current != expected
+ return False
+
+ if adapter_detail:
+ if self.parameters.get('type') is not None:
+ changed = need_to_change(self.parameters['type'], adapter_detail['pending-type'], adapter_detail['type'])
+ changed = changed or need_to_change(self.parameters.get('mode'), adapter_detail['pending-mode'], adapter_detail['mode'])
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.online_or_offline_adapter('down', self.parameters['adapter_name'])
+ if self.parameters.get('pair_adapters') is not None:
+ for adapter in self.parameters['pair_adapters']:
+ self.online_or_offline_adapter('down', adapter)
+ self.modify_adapter()
+ self.online_or_offline_adapter('up', self.parameters['adapter_name'])
+ if self.parameters.get('pair_adapters') is not None:
+ for adapter in self.parameters['pair_adapters']:
+ self.online_or_offline_adapter('up', adapter)
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ adapter = NetAppOntapadapter()
+ adapter.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py
new file mode 100644
index 00000000..dd589074
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+"""
+create Autosupport module to enable, disable or modify
+"""
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create/Delete Unix user group"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_unix_group
+options:
+ state:
+ description:
+ - Whether the specified group should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - Specifies UNIX group's name, unique for each group.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ id:
+ description:
+ - Specifies an identification number for the UNIX group.
+ - Group ID is unique for each UNIX group.
+ - Required for create, modifiable.
+ type: int
+
+ vserver:
+ description:
+ - Specifies the Vserver for the UNIX group.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ skip_name_validation:
+ description:
+ - Specifies if group name validation is skipped.
+ type: bool
+
+ users:
+ description:
+ - Specifies the users associated with this group. Should be comma separated.
+ - It represents the expected state of a list of users at any time.
+ - Add a user into group if it is specified in expected state but not in current state.
+ - Delete a user from group if it is specified in current state but not in expected state.
+ - To delete all current users, use '' as value.
+ type: list
+ elements: str
+ version_added: 2.9.0
+
+short_description: NetApp ONTAP UNIX Group
+version_added: 2.8.0
+
+"""
+
+EXAMPLES = """
+ - name: Create UNIX group
+ na_ontap_unix_group:
+ state: present
+ name: SampleGroup
+ vserver: ansibleVServer
+ id: 2
+ users: user1,user2
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete all users in UNIX group
+ na_ontap_unix_group:
+ state: present
+ name: SampleGroup
+ vserver: ansibleVServer
+ users: ''
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete UNIX group
+ na_ontap_unix_group:
+ state: absent
+ name: SampleGroup
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapUnixGroup(object):
+ """
+ Common operations to manage UNIX groups
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ id=dict(required=False, type='int'),
+ skip_name_validation=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str'),
+ users=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'name': 'group-name'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'id': 'group-id'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'skip_name_validation': 'skip-name-validation'
+ }
+
+ def get_unix_group(self):
+ """
+ Checks if the UNIX group exists.
+
+ :return:
+ dict() if group found
+ None if group is not found
+ """
+
+ get_unix_group = netapp_utils.zapi.NaElement('name-mapping-unix-group-get-iter')
+ attributes = {
+ 'query': {
+ 'unix-group-info': {
+ 'group-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ }
+ }
+ }
+ get_unix_group.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_unix_group, enable_tunneling=True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ group_info = result['attributes-list']['unix-group-info']
+ group_details = dict()
+ else:
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ group_details[item_key] = group_info[zapi_key]
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ group_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=group_info[zapi_key])
+ if group_info.get_child_by_name('users') is not None:
+ group_details['users'] = [user.get_child_content('user-name')
+ for user in group_info.get_child_by_name('users').get_children()]
+ else:
+ group_details['users'] = None
+ return group_details
+
+ def create_unix_group(self):
+ """
+ Creates an UNIX group in the specified Vserver
+
+ :return: None
+ """
+ if self.parameters.get('id') is None:
+ self.module.fail_json(msg='Error: Missing a required parameter for create: (id)')
+
+ group_create = netapp_utils.zapi.NaElement('name-mapping-unix-group-create')
+ group_details = {}
+ for item in self.parameters:
+ if item in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item)
+ group_details[zapi_key] = self.parameters[item]
+ elif item in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item)
+ group_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
+ value=self.parameters[item])
+ elif item in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(item)
+ group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=self.parameters[item])
+ group_create.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(group_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('users') is not None:
+ self.modify_users_in_group()
+
+ def delete_unix_group(self):
+ """
+ Deletes an UNIX group from a vserver
+
+ :return: None
+ """
+ group_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-group-destroy', **{'group-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(group_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_unix_group(self, params):
+ """
+ Modify an UNIX group from a vserver
+ :param params: modify parameters
+ :return: None
+ """
+ # modify users requires separate zapi.
+ if 'users' in params:
+ self.modify_users_in_group()
+ if len(params) == 1:
+ return
+
+ group_modify = netapp_utils.zapi.NaElement('name-mapping-unix-group-modify')
+ group_details = {'group-name': self.parameters['name']}
+ for key in params:
+ if key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(key)
+ group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=params[key])
+ group_modify.translate_struct(group_details)
+
+ try:
+ self.server.invoke_successfully(group_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_users_in_group(self):
+ """
+ Add/delete one or many users in a UNIX group
+
+ :return: None
+ """
+ current_users = self.get_unix_group().get('users')
+ expect_users = self.parameters.get('users')
+
+ if current_users is None:
+ current_users = []
+ if expect_users[0] == '' and len(expect_users) == 1:
+ expect_users = []
+
+ users_to_remove = list(set(current_users) - set(expect_users))
+ users_to_add = list(set(expect_users) - set(current_users))
+
+ if len(users_to_add) > 0:
+ for user in users_to_add:
+ add_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-add-user')
+ group_details = {'group-name': self.parameters['name'], 'user-name': user}
+ add_user.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(add_user, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error adding user %s to UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if len(users_to_remove) > 0:
+ for user in users_to_remove:
+ delete_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-delete-user')
+ group_details = {'group-name': self.parameters['name'], 'user-name': user}
+ delete_user.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(delete_user, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error deleting user %s from UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ Autosupport log for unix_group
+ :return: None
+ """
+ netapp_utils.ems_log_event("na_ontap_unix_group", self.server)
+
+ def apply(self):
+ """
+ Invoke appropriate action based on playbook parameters
+
+ :return: None
+ """
+ self.autosupport_log()
+ current = self.get_unix_group()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters['state'] == 'present' and cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_unix_group()
+ elif cd_action == 'delete':
+ self.delete_unix_group()
+ else:
+ self.modify_unix_group(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapUnixGroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py
new file mode 100644
index 00000000..40f05425
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_unix_user
+
+short_description: NetApp ONTAP UNIX users
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete or modify UNIX users local to ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - Specifies user's UNIX account name.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ group_id:
+ description:
+ - Specifies the primary group identification number for the UNIX user
+ - Required for create, modifiable.
+ type: int
+
+ vserver:
+ description:
+ - Specifies the Vserver for the UNIX user.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ id:
+ description:
+ - Specifies an identification number for the UNIX user.
+ - Required for create, modifiable.
+ type: int
+
+ full_name:
+ description:
+ - Specifies the full name of the UNIX user
+ - Optional for create, modifiable.
+ type: str
+'''
+
+EXAMPLES = """
+
+ - name: Create UNIX User
+ na_ontap_unix_user:
+ state: present
+ name: SampleUser
+ vserver: ansibleVServer
+ group_id: 1
+ id: 2
+ full_name: Test User
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete UNIX User
+ na_ontap_unix_user:
+ state: absent
+ name: SampleUser
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapUnixUser(object):
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ group_id=dict(required=False, type='int'),
+ id=dict(required=False, type='int'),
+ full_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_unix_user(self):
+ """
+ Checks if the UNIX user exists.
+
+ :return:
+ dict() if user found
+ None if user is not found
+ """
+
+ get_unix_user = netapp_utils.zapi.NaElement('name-mapping-unix-user-get-iter')
+ attributes = {
+ 'query': {
+ 'unix-user-info': {
+ 'user-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ }
+ }
+ }
+ get_unix_user.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_unix_user, enable_tunneling=True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ user_info = result['attributes-list']['unix-user-info']
+ return {'group_id': int(user_info['group-id']),
+ 'id': int(user_info['user-id']),
+ 'full_name': user_info['full-name']}
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_unix_user(self):
+ """
+ Creates an UNIX user in the specified Vserver
+
+ :return: None
+ """
+ if self.parameters.get('group_id') is None or self.parameters.get('id') is None:
+ self.module.fail_json(msg='Error: Missing one or more required parameters for create: (group_id, id)')
+
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-create', **{'user-name': self.parameters['name'],
+ 'group-id': str(self.parameters['group_id']),
+ 'user-id': str(self.parameters['id'])})
+ if self.parameters.get('full_name') is not None:
+ user_create.add_new_child('full-name', self.parameters['full_name'])
+
+ try:
+ self.server.invoke_successfully(user_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_unix_user(self):
+ """
+ Deletes an UNIX user from a vserver
+
+ :return: None
+ """
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-destroy', **{'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_unix_user(self, params):
+ user_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-modify', **{'user-name': self.parameters['name']})
+ for key in params:
+ if key == 'group_id':
+ user_modify.add_new_child('group-id', str(params['group_id']))
+ if key == 'id':
+ user_modify.add_new_child('user-id', str(params['id']))
+ if key == 'full_name':
+ user_modify.add_new_child('full-name', params['full_name'])
+
+ try:
+ self.server.invoke_successfully(user_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ Autosupport log for unix_user
+ :return: None
+ """
+ netapp_utils.ems_log_event("na_ontap_unix_user", self.server)
+
+ def apply(self):
+ """
+ Invoke appropriate action based on playbook parameters
+
+ :return: None
+ """
+ self.autosupport_log()
+ current = self.get_unix_user()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters['state'] == 'present' and cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_unix_user()
+ elif cd_action == 'delete':
+ self.delete_unix_user()
+ else:
+ self.modify_unix_user(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapUnixUser()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py
new file mode 100644
index 00000000..26690f0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py
@@ -0,0 +1,712 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_user
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_user
+
+short_description: NetApp ONTAP user configuration and management
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy users.
+
+options:
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ name:
+ description:
+ - The name of the user to manage.
+ required: true
+ type: str
+ applications:
+ description:
+ - List of application to grant access to.
+ - Creating a login with application console, telnet, rsh, and service-processor for a data Vserver is not supported.
+ - Module supports both service-processor and service_processor choices.
+ - ZAPI requires service-processor, while REST requires service_processor, except for an issue with ONTAP 9.6 and 9.7.
+ - snmp is not supported in REST.
+ required: true
+ type: list
+ elements: str
+ choices: ['console', 'http','ontapi','rsh','snmp','service_processor','service-processor','sp','ssh','telnet']
+ aliases:
+ - application
+ authentication_method:
+ description:
+ - Authentication method for the application.
+ - Not all authentication methods are valid for an application.
+ - Valid authentication methods for each application are as denoted in I(authentication_choices_description).
+ - Password for console application
+ - Password, domain, nsswitch, cert for http application.
+ - Password, domain, nsswitch, cert for ontapi application.
+ - Community for snmp application (when creating SNMPv1 and SNMPv2 users).
+ - The usm and community for snmp application (when creating SNMPv3 users).
+ - Password for sp application.
+ - Password for rsh application.
+ - Password for telnet application.
+ - Password, publickey, domain, nsswitch for ssh application.
+ required: true
+ type: str
+ choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert']
+ set_password:
+ description:
+ - Password for the user account.
+ - It is ignored for creating snmp users, but is required for creating non-snmp users.
+ - For an existing user, this value will be used as the new password.
+ type: str
+ role_name:
+ description:
+ - The name of the role. Required when C(state=present)
+ type: str
+ lock_user:
+ description:
+ - Whether the specified user account is locked.
+ type: bool
+ vserver:
+ description:
+ - The name of the vserver to use.
+ aliases:
+ - svm
+ required: true
+ type: str
+ authentication_protocol:
+ description:
+ - Authentication protocol for the snmp user.
+ - When cluster FIPS mode is on, 'sha' and 'sha2-256' are the only possible and valid values.
+ - When cluster FIPS mode is off, the default value is 'none'.
+ - When cluster FIPS mode is on, the default value is 'sha'.
+ - Only available for 'usm' authentication method and non modifiable.
+ choices: ['none', 'md5', 'sha', 'sha2-256']
+ type: str
+ version_added: '20.6.0'
+ authentication_password:
+ description:
+ - Password for the authentication protocol. This should be minimum 8 characters long.
+ - This is required for 'md5', 'sha' and 'sha2-256' authentication protocols and not required for 'none'.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ engine_id:
+ description:
+ - Authoritative entity's EngineID for the SNMPv3 user.
+ - This should be specified as a hexadecimal string.
+ - Engine ID with first bit set to 1 in first octet should have a minimum of 5 or maximum of 32 octets.
+ - Engine Id with first bit set to 0 in the first octet should be 12 octets in length.
+ - Engine Id cannot have all zeros in its address.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ privacy_protocol:
+ description:
+ - Privacy protocol for the snmp user.
+ - When cluster FIPS mode is on, 'aes128' is the only possible and valid value.
+ - When cluster FIPS mode is off, the default value is 'none'. When cluster FIPS mode is on, the default value is 'aes128'.
+ - Only available for 'usm' authentication method and non modifiable.
+ choices: ['none', 'des', 'aes128']
+ type: str
+ version_added: '20.6.0'
+ privacy_password:
+ description:
+ - Password for the privacy protocol. This should be minimum 8 characters long.
+ - This is required for 'des' and 'aes128' privacy protocols and not required for 'none'.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ remote_switch_ipaddress:
+ description:
+ - This optionally specifies the IP Address of the remote switch.
+ - The remote switch could be a cluster switch monitored by Cluster Switch Health Monitor (CSHM)
+ or a Fiber Channel (FC) switch monitored by Metro Cluster Health Monitor (MCC-HM).
+ - This is applicable only for a remote SNMPv3 user i.e. only if user is a remote (non-local) user,
+ application is snmp and authentication method is usm.
+ type: str
+ version_added: '20.6.0'
+'''
+
+EXAMPLES = """
+
+ - name: Create User
+ na_ontap_user:
+ state: present
+ name: SampleUser
+ applications: ssh,console
+ authentication_method: password
+ set_password: apn1242183u1298u41
+ lock_user: True
+ role_name: vsadmin
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete User
+ na_ontap_user:
+ state: absent
+ name: SampleUser
+ applications: ssh
+ authentication_method: password
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create user with snmp application (ZAPI)
+ na_ontap_user:
+ state: present
+ name: test_cert_snmp
+ applications: snmp
+ authentication_method: usm
+ role_name: admin
+ authentication_protocol: md5
+ authentication_password: '12345678'
+ privacy_protocol: 'aes128'
+ privacy_password: '12345678'
+ engine_id: '7063514941000000000000'
+ remote_switch_ipaddress: 10.0.0.0
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapUser(object):
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+
+ applications=dict(required=True, type='list', elements='str', aliases=['application'],
+ choices=['console', 'http', 'ontapi', 'rsh', 'snmp',
+ 'sp', 'service-processor', 'service_processor', 'ssh', 'telnet'],),
+ authentication_method=dict(required=True, type='str',
+ choices=['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert']),
+ set_password=dict(required=False, type='str', no_log=True),
+ role_name=dict(required=False, type='str'),
+ lock_user=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str', aliases=['svm']),
+ authentication_protocol=dict(required=False, type='str', choices=['none', 'md5', 'sha', 'sha2-256']),
+ authentication_password=dict(required=False, type='str', no_log=True),
+ engine_id=dict(required=False, type='str'),
+ privacy_protocol=dict(required=False, type='str', choices=['none', 'des', 'aes128']),
+ privacy_password=dict(required=False, type='str', no_log=True),
+ remote_switch_ipaddress=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['role_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API should be used for ONTAP 9.6 or higher
+ self.rest_api = OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['authentication_password', 'authentication_protocol', 'engine_id',
+ 'privacy_password', 'privacy_protocol']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if not self.use_rest:
+ if not HAS_NETAPP_LIB:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ else:
+ if 'snmp' in self.parameters['applications']:
+ self.module.fail_json(msg="Snmp as application is not supported in REST.")
+
+ def get_user_rest(self):
+ api = 'security/accounts'
+ params = {
+ 'name': self.parameters['name']
+ }
+ if self.parameters.get('vserver') is None:
+ # vserser is empty for cluster
+ params['scope'] = 'cluster'
+ else:
+ params['owner.name'] = self.parameters['vserver']
+
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg='Error while fetching user info: %s' % error)
+ if message['num_records'] == 1:
+ return message['records'][0]['owner']['uuid'], message['records'][0]['name']
+ if message['num_records'] > 1:
+ self.module.fail_json(msg='Error while fetching user info, found multiple entries: %s' % repr(message))
+
+ return None
+
+ def get_user_details_rest(self, name, uuid):
+ params = {
+ 'fields': 'role,applications,locked'
+ }
+ api = "security/accounts/%s/%s" % (uuid, name)
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg='Error while fetching user details: %s' % error)
+ if message:
+ return_value = {
+ 'role_name': message['role']['name'],
+ 'applications': [app['application'] for app in message['applications']]
+ }
+ if "locked" in message:
+ return_value['lock_user'] = message['locked']
+ return return_value
+
+ def get_user(self, application=None):
+ """
+ Checks if the user exists.
+ :param: application: application to grant access to
+ :return:
+ Dictionary if user found
+ None if user is not found
+ """
+ security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-account-info', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'authentication-method': self.parameters['authentication_method']})
+ if application is not None:
+ query_details.add_new_child('application', application)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(security_login_get_iter,
+ enable_tunneling=False)
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ interface_attributes = result.get_child_by_name('attributes-list').\
+ get_child_by_name('security-login-account-info')
+ return_value = {
+ 'lock_user': interface_attributes.get_child_content('is-locked'),
+ 'role_name': interface_attributes.get_child_content('role-name')
+ }
+ return return_value
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 16034 denotes a user not being found.
+ if to_native(error.code) == "16034":
+ return None
+ # Error 16043 denotes the user existing, but the application missing
+ elif to_native(error.code) == "16043":
+ return None
+ else:
+ self.module.fail_json(msg='Error getting user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_user_rest(self, apps=None):
+ app_list = list()
+ if apps is not None:
+ for app in apps:
+ mydict = {
+ "application": app,
+ "authentication_methods": self.parameters['authentication_method'].split(),
+ }
+ app_list.append(mydict)
+ api = 'security/accounts'
+ params = {
+ 'name': self.parameters['name'],
+ 'role.name': self.parameters['role_name'],
+ 'applications': app_list
+ }
+ if self.parameters.get('vserver') is not None:
+ # vserser is empty for cluster
+ params['owner.name'] = self.parameters['vserver']
+ if 'set_password' in self.parameters:
+ params['password'] = self.parameters['set_password']
+ if 'lock_user' in self.parameters:
+ params['locked'] = self.parameters['lock_user']
+ dummy, error = self.rest_api.post(api, params)
+ error_sp = None
+ if error:
+ if 'invalid value' in error['message']:
+ if 'service-processor' in error['message'] or 'service_processor' in error['message']:
+ # find if there is error for service processor application value
+ # update value as per ONTAP version support
+ app_list_sp = params['applications']
+ for app_item in app_list_sp:
+ if 'service-processor' == app_item['application']:
+ app_item['application'] = 'service_processor'
+ elif 'service_processor' == app_item['application']:
+ app_item['application'] = 'service-processor'
+ params['applications'] = app_list_sp
+ # post again and throw first error in case of an error
+ dummy, error_sp = self.rest_api.post(api, params)
+ if error_sp:
+ self.module.fail_json(msg='Error while creating user: %s' % error)
+ return True
+
+ # non-sp errors thrown
+ if error:
+ self.module.fail_json(msg='Error while creating user: %s' % error)
+
+ def create_user(self, application):
+ """
+ creates the user for the given application and authentication_method
+ :param: application: application to grant access to
+ """
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-create', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application,
+ 'authentication-method': self.parameters['authentication_method'],
+ 'role-name': self.parameters.get('role_name')})
+ if self.parameters.get('set_password') is not None:
+ user_create.add_new_child('password', self.parameters.get('set_password'))
+ if self.parameters.get('authentication_method') == 'usm':
+ if self.parameters.get('remote_switch_ipaddress') is not None:
+ user_create.add_new_child('remote-switch-ipaddress', self.parameters.get('remote_switch_ipaddress'))
+ snmpv3_login_info = netapp_utils.zapi.NaElement('snmpv3-login-info')
+ if self.parameters.get('authentication_password') is not None:
+ snmpv3_login_info.add_new_child('authentication-password', self.parameters['authentication_password'])
+ if self.parameters.get('authentication_protocol') is not None:
+ snmpv3_login_info.add_new_child('authentication-protocol', self.parameters['authentication_protocol'])
+ if self.parameters.get('engine_id') is not None:
+ snmpv3_login_info.add_new_child('engine-id', self.parameters['engine_id'])
+ if self.parameters.get('privacy_password') is not None:
+ snmpv3_login_info.add_new_child('privacy-password', self.parameters['privacy_password'])
+ if self.parameters.get('privacy_protocol') is not None:
+ snmpv3_login_info.add_new_child('privacy-protocol', self.parameters['privacy_protocol'])
+ user_create.add_child_elem(snmpv3_login_info)
+
+ try:
+ self.server.invoke_successfully(user_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def lock_unlock_user_rest(self, useruuid, username, value=None):
+ data = {
+ 'locked': value
+ }
+ params = {
+ 'name': self.parameters['name'],
+ 'owner.uuid': useruuid,
+ }
+ api = "security/accounts/%s/%s" % (useruuid, username)
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error:
+ self.module.fail_json(msg='Error while locking/unlocking user: %s' % error)
+
+ def lock_given_user(self):
+ """
+ locks the user
+
+ :return:
+ True if user locked
+ False if lock user is not performed
+ :rtype: bool
+ """
+ user_lock = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-lock', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_lock,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error locking user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def unlock_given_user(self):
+ """
+ unlocks the user
+
+ :return:
+ True if user unlocked
+ False if unlock user is not performed
+ :rtype: bool
+ """
+ user_unlock = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-unlock', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_unlock,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == '13114':
+ return False
+ else:
+ self.module.fail_json(msg='Error unlocking user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def delete_user_rest(self):
+ uuid, username = self.get_user_rest()
+ api = "security/accounts/%s/%s" % (uuid, username)
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg='Error while deleting user : %s' % error)
+
+ def delete_user(self, application):
+ """
+ deletes the user for the given application and authentication_method
+ :param: application: application to grant access to
+ """
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-delete', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application,
+ 'authentication-method': self.parameters['authentication_method']})
+
+ try:
+ self.server.invoke_successfully(user_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def is_repeated_password(message):
+ return message.startswith('New password must be different than last 6 passwords.') \
+ or message.startswith('New password must be different from last 6 passwords.') \
+ or message.startswith('New password must be different than the old password.') \
+ or message.startswith('New password must be different from the old password.')
+
+ def change_password_rest(self, useruuid, username):
+ data = {
+ 'password': self.parameters['set_password'],
+ }
+ params = {
+ 'name': self.parameters['name'],
+ 'owner.uuid': useruuid,
+ }
+ api = "security/accounts/%s/%s" % (useruuid, username)
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error:
+ if 'message' in error and self.is_repeated_password(error['message']):
+ # if the password is reused, assume idempotency
+ return False
+ else:
+ self.module.fail_json(msg='Error while updating user password: %s' % error)
+ return True
+
+ def change_password(self):
+ """
+ Changes the password
+
+ :return:
+ True if password updated
+ False if password is not updated
+ :rtype: bool
+ """
+ # self.server.set_vserver(self.parameters['vserver'])
+ modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify-password', **{
+ 'new-password': str(self.parameters.get('set_password')),
+ 'user-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(modify_password,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == '13114':
+ return False
+ # if the user give the same password, instead of returning an error, return ok
+ if to_native(error.code) == '13214' and self.is_repeated_password(error.message):
+ return False
+ self.module.fail_json(msg='Error setting password for user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ self.server.set_vserver(None)
+ return True
+
+ def modify_apps_rest(self, useruuid, username, apps=None):
+ app_list = list()
+ if apps is not None:
+ for app in apps:
+ mydict = {
+ "application": app,
+ "authentication_methods": self.parameters['authentication_method'].split(),
+ }
+ app_list.append(mydict)
+ data = {
+ 'role.name': self.parameters['role_name'],
+ 'applications': app_list
+ }
+ params = {
+ 'name': self.parameters['name'],
+ 'owner.uuid': useruuid,
+ }
+ api = "security/accounts/%s/%s" % (useruuid, username)
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error:
+ self.module.fail_json(msg='Error while modifying user details: %s' % error)
+
+ def modify_user(self, application):
+ """
+ Modify user
+ """
+ user_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application,
+ 'authentication-method': self.parameters['authentication_method'],
+ 'role-name': self.parameters.get('role_name')})
+
+ try:
+ self.server.invoke_successfully(user_modify,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def change_sp_application(self, current_app):
+ if 'service-processor' or 'service_processor' in self.parameters['applications']:
+ if 'service-processor' in current_app:
+ if 'service_processor' in self.parameters['applications']:
+ index = self.parameters['applications'].index('service_processor')
+ self.parameters['applications'][index] = 'service-processor'
+ if 'service_processor' in current_app:
+ if 'service-processor' in self.parameters['applications']:
+ index = self.parameters['applications'].index('service-processor')
+ self.parameters['applications'][index] = 'service_processor'
+
+ def apply_for_rest(self):
+ current = self.get_user_rest()
+ if current is not None:
+ uuid, name = current
+ current = self.get_user_details_rest(name, uuid)
+ self.change_sp_application(current['applications'])
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify_decision = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if current and 'lock_user' not in current:
+ # REST does not return locked if password is not set
+ if cd_action is None and self.parameters.get('lock_user') is not None:
+ if self.parameters.get('set_password') is None:
+ self.module.fail_json(msg='Error: cannot modify lock state if password is not set.')
+ modify_decision['lock_user'] = self.parameters['lock_user']
+ self.na_helper.changed = True
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_user_rest(self.parameters['applications'])
+ elif cd_action == 'delete':
+ self.delete_user_rest()
+ elif modify_decision:
+ if 'role_name' in modify_decision or 'applications' in modify_decision:
+ self.modify_apps_rest(uuid, name, self.parameters['applications'])
+ if cd_action is None and self.parameters.get('set_password') is not None:
+ # if check_mode, don't attempt to change the password, but assume it would be changed
+ if self.module.check_mode or self.change_password_rest(uuid, name):
+ self.na_helper.changed = True
+ if cd_action is None and self.na_helper.changed and not self.module.check_mode:
+ # lock/unlock actions require password to be set
+ if modify_decision and 'lock_user' in modify_decision:
+ self.lock_unlock_user_rest(uuid, name, self.parameters['lock_user'])
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def apply(self):
+ if self.use_rest:
+ self.apply_for_rest()
+ else:
+ create_delete_decision = {}
+ modify_decision = {}
+ netapp_utils.ems_log_event("na_ontap_user", self.server)
+ for application in self.parameters['applications']:
+ current = self.get_user(application)
+
+ if current is not None:
+ current['lock_user'] = self.na_helper.get_value_for_bool(True, current['lock_user'])
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if cd_action is not None:
+ create_delete_decision[application] = cd_action
+ else:
+ modify_decision[application] = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if not create_delete_decision and self.parameters.get('state') == 'present':
+ if self.parameters.get('set_password') is not None:
+ self.na_helper.changed = True
+
+ if self.na_helper.changed:
+
+ if self.module.check_mode:
+ pass
+ else:
+ for application in create_delete_decision:
+ if create_delete_decision[application] == 'create':
+ self.create_user(application)
+ elif create_delete_decision[application] == 'delete':
+ self.delete_user(application)
+ lock_user = False
+ for application in modify_decision:
+ if 'role_name' in modify_decision[application]:
+ self.modify_user(application)
+ if 'lock_user' in modify_decision[application]:
+ lock_user = True
+ if not create_delete_decision and self.parameters.get('set_password') is not None:
+ # if change password return false nothing has changed so we need to set changed to False
+ self.na_helper.changed = self.change_password()
+ # NOTE: unlock has to be performed after setting a password
+ if lock_user:
+ if self.parameters.get('lock_user'):
+ self.lock_given_user()
+ else:
+ self.unlock_given_user()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapUser()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py
new file mode 100644
index 00000000..206342ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_user_role
+
+short_description: NetApp ONTAP user role configuration and management
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy user roles
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the role to manage.
+ required: true
+ type: str
+
+ command_directory_name:
+ description:
+ - The command or command directory to which the role has an access.
+ required: true
+ type: str
+
+ access_level:
+ description:
+ - The name of the role to manage.
+ choices: ['none', 'readonly', 'all']
+ type: str
+ default: all
+
+ query:
+ description:
+ - A query for the role. The query must apply to the specified command or directory name.
+ - Use double quotes "" for modifying a existing query to none.
+ type: str
+ version_added: 2.8.0
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ type: str
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User Role
+ na_ontap_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: volume
+ access_level: none
+ query: show
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify User Role
+ na_ontap_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: volume
+ access_level: none
+ query: ""
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapUserRole(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ command_directory_name=dict(required=True, type='str'),
+ access_level=dict(required=False, type='str', default='all',
+ choices=['none', 'readonly', 'all']),
+ vserver=dict(required=True, type='str'),
+ query=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_role(self):
+ """
+ Checks if the role exists for specific command-directory-name.
+
+ :return:
+ True if role found
+ False if role is not found
+ :rtype: bool
+ """
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name']}
+
+ security_login_role_get_iter = netapp_utils.zapi.NaElement(
+ 'security-login-role-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-info', **options)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_role_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(
+ security_login_role_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16031 denotes a role not being found.
+ if to_native(e.code) == "16031":
+ return None
+ # Error 16039 denotes command directory not found.
+ elif to_native(e.code) == "16039":
+ return None
+ else:
+ self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ role_info = result.get_child_by_name('attributes-list').get_child_by_name('security-login-role-info')
+ result = {
+ 'name': role_info['role-name'],
+ 'access_level': role_info['access-level'],
+ 'command_directory_name': role_info['command-directory-name'],
+ 'query': role_info['role-query']
+ }
+ return result
+
+ return None
+
+ def create_role(self):
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name'],
+ 'access-level': self.parameters['access_level']}
+ if self.parameters.get('query'):
+ options['role-query'] = self.parameters['query']
+ role_create = netapp_utils.zapi.NaElement.create_node_with_children('security-login-role-create', **options)
+
+ try:
+ self.server.invoke_successfully(role_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_role(self):
+ role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-delete', **{'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name':
+ self.parameters['command_directory_name']})
+
+ try:
+ self.server.invoke_successfully(role_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_role(self, modify):
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name']}
+ if 'access_level' in modify.keys():
+ options['access-level'] = self.parameters['access_level']
+ if 'query' in modify.keys():
+ options['role-query'] = self.parameters['query']
+
+ role_modify = netapp_utils.zapi.NaElement.create_node_with_children('security-login-role-modify', **options)
+
+ try:
+ self.server.invoke_successfully(role_modify,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ self.asup_log_for_cserver('na_ontap_user_role')
+ current = self.get_role()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ # if desired state specify empty quote query and current query is None, set desired query to None.
+ # otherwise na_helper.get_modified_attributes will detect a change.
+ if self.parameters.get('query') == '' and current is not None:
+ if current['query'] is None:
+ self.parameters['query'] = None
+
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_role()
+ elif cd_action == 'delete':
+ self.delete_role()
+ elif modify:
+ self.modify_role(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ netapp_utils.ems_log_event(event_name, self.server)
+
+
+def main():
+ obj = NetAppOntapUserRole()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py
new file mode 100644
index 00000000..94df84bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py
@@ -0,0 +1,2100 @@
+#!/usr/bin/python
+
+# (c) 2018-2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_volume
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_volume
+
+short_description: NetApp ONTAP manage volumes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy or modify volumes on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ from_name:
+ description:
+ - Name of the existing volume to be renamed to name.
+ type: str
+ version_added: 2.7.0
+
+ is_infinite:
+ type: bool
+ description:
+ Set True if the volume is an Infinite Volume.
+ Deleting an infinite volume is asynchronous.
+
+ is_online:
+ type: bool
+ description:
+ - Whether the specified volume is online, or not.
+ default: True
+
+ aggregate_name:
+ description:
+ - The name of the aggregate the flexvol should exist on.
+ - Cannot be set when using the na_application_template option.
+ type: str
+
+ nas_application_template:
+ description:
+ - additional options when using the application/applications REST API to create a volume.
+ - the module is using ZAPI by default, and switches to REST if any suboption is present.
+ - create a FlexVol by default.
+ - create a FlexGroup if C(auto_provision_as) is set and C(FlexCache) option is not present.
+ - create a FlexCache if C(flexcache) option is present.
+ type: dict
+ version_added: 20.12.0
+ suboptions:
+ flexcache:
+ description: whether to create a flexcache. If absent, a FlexVol or FlexGroup is created.
+ type: dict
+ suboptions:
+ origin_svm_name:
+ description: the remote SVM for the flexcache.
+ type: str
+ required: true
+ origin_component_name:
+ description: the remote component for the flexcache.
+ type: str
+ required: true
+ cifs_access:
+ description:
+ - The list of CIFS access controls. You must provide I(user_or_group) or I(access) to enable CIFS access.
+ type: list
+ elements: dict
+ suboptions:
+ access:
+ description: The CIFS access granted to the user or group. Default is full_control.
+ type: str
+ choices: [change, full_control, no_access, read]
+ user_or_group:
+ description: The name of the CIFS user or group that will be granted access. Default is Everyone.
+ type: str
+ nfs_access:
+ description:
+ - The list of NFS access controls. You must provide I(host) or I(access) to enable NFS access.
+ - Mutually exclusive with export_policy option in nas_application_template.
+ type: list
+ elements: dict
+ suboptions:
+ access:
+ description: The NFS access granted. Default is rw.
+ type: str
+ choices: [none, ro, rw]
+ host:
+ description: The name of the NFS entity granted access. Default is 0.0.0.0/0.
+ type: str
+ storage_service:
+ description:
+ - The performance service level (PSL) for this volume
+ type: str
+ choices: ['value', 'performance', 'extreme']
+ tiering:
+ description:
+ - Cloud tiering policy (see C(tiering_policy) for a more complete description).
+ type: dict
+ suboptions:
+ control:
+ description: Storage tiering placement rules for the container.
+ choices: ['required', 'best_effort', 'disallowed']
+ type: str
+ policy:
+ description:
+ - Cloud tiering policy (see C(tiering_policy)).
+ - Must match C(tiering_policy) if both are present.
+ choices: ['snapshot-only', 'auto', 'backup', 'none']
+ type: str
+ object_stores:
+ description: list of object store names for tiering.
+ type: list
+ elements: str
+ use_nas_application:
+ description:
+ - Whether to use the application/applications REST/API to create a volume.
+ - This will default to true if any other suboption is present.
+ type: bool
+ default: true
+
+ size:
+ description:
+ - The size of the volume in (size_unit). Required when C(state=present).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: 'gb'
+
+ size_change_threshold:
+ description:
+ - Percentage in size change to trigger a resize.
+ - When this parameter is greater than 0, a difference in size between what is expected and what is configured is ignored if it is below the threshold.
+ - For instance, the nas application allocates a larger size than specified to account for overhead.
+ - Set this to 0 for an exact match.
+ type: int
+ default: 10
+ version_added: 20.12.0
+
+ sizing_method:
+ description:
+ - Represents the method to modify the size of a FlexGroup.
+ - use_existing_resources - Increases or decreases the size of the FlexGroup by increasing or decreasing the size of the current FlexGroup resources.
+ - add_new_resources - Increases the size of the FlexGroup by adding new resources. This is limited to two new resources per available aggregate.
+ - This is only supported if REST is enabled (ONTAP 9.6 or later) and only for FlexGroups. ONTAP defaults to use_existing_resources.
+ type: str
+ choices: ['add_new_resources', 'use_existing_resources']
+ version_added: 20.12.0
+
+ type:
+ description:
+ - The volume type, either read-write (RW) or data-protection (DP).
+ type: str
+
+ export_policy:
+ description:
+ - Name of the export policy.
+ - Mutually exclusive with nfs_access suboption in nas_application_template.
+ type: str
+ aliases: ['policy']
+
+ junction_path:
+ description:
+ - Junction path of the volume.
+ - To unmount, use junction path C('').
+ type: str
+
+ space_guarantee:
+ description:
+ - Space guarantee style for the volume.
+ choices: ['none', 'file', 'volume']
+ type: str
+
+ percent_snapshot_space:
+ description:
+ - Amount of space reserved for snapshot copies of the volume.
+ type: int
+
+ volume_security_style:
+ description:
+ - The security style associated with this volume.
+ choices: ['mixed', 'ntfs', 'unified', 'unix']
+ type: str
+
+ encrypt:
+ type: bool
+ description:
+ - Whether or not to enable Volume Encryption.
+ default: False
+ version_added: 2.7.0
+
+ efficiency_policy:
+ description:
+ - Allows a storage efficiency policy to be set on volume creation.
+ type: str
+ version_added: 2.7.0
+
+ unix_permissions:
+ description:
+ - Unix permission bits in octal or symbolic format.
+ - For example, 0 is equivalent to ------------, 777 is equivalent to ---rwxrwxrwx,both formats are accepted.
+ - The valid octal value ranges between 0 and 777 inclusive.
+ type: str
+ version_added: 2.8.0
+
+ group_id:
+ description:
+ - The UNIX group ID for the volume. The default value is 0 ('root').
+ type: int
+ version_added: '20.1.0'
+
+ user_id:
+ description:
+ - The UNIX user ID for the volume. The default value is 0 ('root').
+ type: int
+ version_added: '20.1.0'
+
+ snapshot_policy:
+ description:
+ - The name of the snapshot policy.
+ - The default policy name is 'default'.
+ - If present, this will set the protection_type when using C(nas_application_template).
+ type: str
+ version_added: 2.8.0
+
+ aggr_list:
+ description:
+ - an array of names of aggregates to be used for FlexGroup constituents.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ aggr_list_multiplier:
+ description:
+ - The number of times to iterate over the aggregates listed with the aggr_list parameter when creating a FlexGroup.
+ type: int
+ version_added: 2.8.0
+
+ auto_provision_as:
+ description:
+ - Automatically provision a FlexGroup volume.
+ version_added: 2.8.0
+ choices: ['flexgroup']
+ type: str
+
+ snapdir_access:
+ description:
+ - This is an advanced option, the default is False.
+ - Enable the visible '.snapshot' directory that is normally present at system internal mount points.
+ - This value also turns on access to all other '.snapshot' directories in the volume.
+ type: bool
+ version_added: 2.8.0
+
+ atime_update:
+ description:
+ - This is an advanced option, the default is True.
+ - If false, prevent the update of inode access times when a file is read.
+ - This value is useful for volumes with extremely high read traffic,
+ since it prevents writes to the inode file for the volume from contending with reads from other files.
+ - This field should be used carefully.
+ - That is, use this field when you know in advance that the correct access time for inodes will not be needed for files on that volume.
+ type: bool
+ version_added: 2.8.0
+
+ wait_for_completion:
+ description:
+ - Set this parameter to 'true' for synchronous execution during create (wait until volume status is online)
+ - Set this parameter to 'false' for asynchronous execution
+ - For asynchronous, execution exits as soon as the request is sent, without checking volume status
+ type: bool
+ default: false
+ version_added: 2.8.0
+
+ time_out:
+ description:
+ - time to wait for flexGroup creation, modification, or deletion in seconds.
+ - Error out if task is not completed in defined time.
+ - if 0, the request is asynchronous.
+ - default is set to 3 minutes.
+ default: 180
+ type: int
+ version_added: 2.8.0
+
+ language:
+ description:
+ - Language to use for Volume
+ - Default uses SVM language
+ - Possible values Language
+ - c POSIX
+ - ar Arabic
+ - cs Czech
+ - da Danish
+ - de German
+ - en English
+ - en_us English (US)
+ - es Spanish
+ - fi Finnish
+ - fr French
+ - he Hebrew
+ - hr Croatian
+ - hu Hungarian
+ - it Italian
+ - ja Japanese euc-j
+ - ja_v1 Japanese euc-j
+ - ja_jp.pck Japanese PCK (sjis)
+ - ja_jp.932 Japanese cp932
+ - ja_jp.pck_v2 Japanese PCK (sjis)
+ - ko Korean
+ - no Norwegian
+ - nl Dutch
+ - pl Polish
+ - pt Portuguese
+ - ro Romanian
+ - ru Russian
+ - sk Slovak
+ - sl Slovenian
+ - sv Swedish
+ - tr Turkish
+ - zh Simplified Chinese
+ - zh.gbk Simplified Chinese (GBK)
+ - zh_tw Traditional Chinese euc-tw
+ - zh_tw.big5 Traditional Chinese Big 5
+ - To use UTF-8 as the NFS character set, append '.UTF-8' to the language code
+ type: str
+ version_added: 2.8.0
+
+ qos_policy_group:
+ description:
+ - Specifies a QoS policy group to be set on volume.
+ type: str
+ version_added: 2.9.0
+
+ qos_adaptive_policy_group:
+ description:
+ - Specifies a QoS adaptive policy group to be set on volume.
+ type: str
+ version_added: 2.9.0
+
+ tiering_policy:
+ description:
+ - The tiering policy that is to be associated with the volume.
+ - This policy decides whether the blocks of a volume will be tiered to the capacity tier.
+ - snapshot-only policy allows tiering of only the volume snapshot copies not associated with the active file system.
+ - auto policy allows tiering of both snapshot and active file system user data to the capacity tier.
+ - backup policy on DP volumes allows all transferred user data blocks to start in the capacity tier.
+ - When set to none, the Volume blocks will not be tiered to the capacity tier.
+ - If no value specified, the volume is assigned snapshot only by default.
+ - Requires ONTAP 9.4 or later.
+ choices: ['snapshot-only', 'auto', 'backup', 'none']
+ type: str
+ version_added: 2.9.0
+
+ space_slo:
+ description:
+ - Specifies the space SLO type for the volume. The space SLO type is the Service Level Objective for space management for the volume.
+ - The space SLO value is used to enforce existing volume settings so that sufficient space is set aside on the aggregate to meet the space SLO.
+ - This parameter is not supported on Infinite Volumes.
+ choices: ['none', 'thick', 'semi-thick']
+ type: str
+ version_added: 2.9.0
+
+ nvfail_enabled:
+ description:
+ - If true, the controller performs additional work at boot and takeover times if it finds that there has been any potential data loss in the volume's
+ constituents due to an NVRAM failure.
+ - The volume's constituents would be put in a special state called 'in-nvfailed-state' such that protocol access is blocked.
+ - This will cause the client applications to crash and thus prevent access to stale data.
+ - To get out of this situation, the admin needs to manually clear the 'in-nvfailed-state' on the volume's constituents.
+ type: bool
+ version_added: 2.9.0
+
+ vserver_dr_protection:
+ description:
+ - Specifies the protection type for the volume in a Vserver DR setup.
+ choices: ['protected', 'unprotected']
+ type: str
+ version_added: 2.9.0
+
+ comment:
+ description:
+ - Sets a comment associated with the volume.
+ type: str
+ version_added: 2.9.0
+
+ snapshot_auto_delete:
+ description:
+ - A dictionary for the auto delete options and values.
+ - Supported options include 'state', 'commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete',
+ 'prefix', 'destroy_list'.
+ - Option 'state' determines if the snapshot autodelete is currently enabled for the volume. Possible values are 'on' and 'off'.
+ - Option 'commitment' determines the snapshots which snapshot autodelete is allowed to delete to get back space.
+ Possible values are 'try', 'disrupt' and 'destroy'.
+ - Option 'trigger' determines the condition which starts the automatic deletion of snapshots.
+ Possible values are 'volume', 'snap_reserve' and DEPRECATED 'space_reserve'.
+ - Option 'target_free_space' determines when snapshot autodelete should stop deleting snapshots. Depending on the trigger,
+ snapshots are deleted till we reach the target free space percentage. Accepts int type.
+ - Option 'delete_order' determines if the oldest or newest snapshot is deleted first. Possible values are 'newest_first' and 'oldest_first'.
+ - Option 'defer_delete' determines which kind of snapshots to delete in the end. Possible values are 'scheduled', 'user_created',
+ 'prefix' and 'none'.
+ - Option 'prefix' can be set to provide the prefix string for the 'prefix' value of the 'defer_delete' option.
+ The prefix string length can be 15 char long.
+ - Option 'destroy_list' is a comma seperated list of services which can be destroyed if the snapshot backing that service is deleted.
+ For 7-mode, the possible values for this option are a combination of 'lun_clone', 'vol_clone', 'cifs_share', 'file_clone' or 'none'.
+ For cluster-mode, the possible values for this option are a combination of 'lun_clone,file_clone' (for LUN clone and/or file clone),
+ 'lun_clone,sfsr' (for LUN clone and/or sfsr), 'vol_clone', 'cifs_share', or 'none'.
+ type: dict
+ version_added: '20.4.0'
+
+ cutover_action:
+ description:
+ - Specifies the action to be taken for cutover.
+ - Possible values are 'abort_on_failure', 'defer_on_failure', 'force' and 'wait'. Default is 'defer_on_failure'.
+ choices: ['abort_on_failure', 'defer_on_failure', 'force', 'wait']
+ type: str
+ version_added: '20.5.0'
+
+ check_interval:
+ description:
+ - The amount of time in seconds to wait between checks of a volume to see if it has moved successfully.
+ default: 30
+ type: int
+ version_added: '20.6.0'
+
+ from_vserver:
+ description:
+ - The source vserver of the volume is rehosted.
+ type: str
+ version_added: '20.6.0'
+
+ auto_remap_luns:
+ description:
+ - Flag to control automatic map of LUNs.
+ type: bool
+ version_added: '20.6.0'
+
+ force_unmap_luns:
+ description:
+ - Flag to control automatic unmap of LUNs.
+ type: bool
+ version_added: '20.6.0'
+
+ force_restore:
+ description:
+ - If this field is set to "true", the Snapshot copy is restored even if the volume has one or more newer Snapshot
+ copies which are currently used as reference Snapshot copy by SnapMirror. If a restore is done in this
+ situation, this will cause future SnapMirror transfers to fail.
+ - Option should only be used along with snapshot_restore.
+ type: bool
+ version_added: '20.6.0'
+
+ preserve_lun_ids:
+ description:
+ - If this field is set to "true", LUNs in the volume being restored will remain mapped and their identities
+ preserved such that host connectivity will not be disrupted during the restore operation. I/O's to the LUN will
+ be fenced during the restore operation by placing the LUNs in an unavailable state. Once the restore operation
+ has completed, hosts will be able to resume I/O access to the LUNs.
+ - Option should only be used along with snapshot_restore.
+ type: bool
+ version_added: '20.6.0'
+
+ snapshot_restore:
+ description:
+ - Name of snapshot to restore from.
+ - Not supported on Infinite Volume.
+ type: str
+ version_added: '20.6.0'
+
+ compression:
+ description:
+ - Whether to enable compression for the volume (HDD and Flash Pool aggregates).
+ - If this option is not present, it is automatically set to true if inline_compression is true.
+ type: bool
+ version_added: '20.12.0'
+
+ inline_compression:
+ description:
+ - Whether to enable inline compression for the volume (HDD and Flash Pool aggregates, AFF platforms).
+ type: bool
+ version_added: '20.12.0'
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexVol
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume12
+ is_infinite: False
+ aggregate_name: ansible_aggr
+ size: 100
+ size_unit: mb
+ user_id: 1001
+ group_id: 2002
+ space_guarantee: none
+ tiering_policy: auto
+ export_policy: default
+ percent_snapshot_space: 60
+ qos_policy_group: max_performance_gold
+ vserver: ansibleVServer
+ wait_for_completion: True
+ space_slo: none
+ nvfail_enabled: False
+ comment: ansible created volume
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Volume Delete
+ na_ontap_volume:
+ state: absent
+ name: ansibleVolume12
+ aggregate_name: ansible_aggr
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Make FlexVol offline
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ is_online: False
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create flexGroup volume manually
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ aggr_list: "{{ aggr_list }}"
+ aggr_list_multiplier: 2
+ size: 200
+ size_unit: mb
+ space_guarantee: none
+ export_policy: default
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ unix_permissions: 777
+ snapshot_policy: default
+ time_out: 0
+
+ - name: Create flexGroup volume auto provsion as flex group
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ auto_provision_as: flexgroup
+ size: 200
+ size_unit: mb
+ space_guarantee: none
+ export_policy: default
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ unix_permissions: 777
+ snapshot_policy: default
+ time_out: 0
+
+ - name: Create FlexVol with QoS adaptive
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume15
+ is_infinite: False
+ aggregate_name: ansible_aggr
+ size: 100
+ size_unit: gb
+ space_guarantee: none
+ export_policy: default
+ percent_snapshot_space: 10
+ qos_adaptive_policy_group: extreme
+ vserver: ansibleVServer
+ wait_for_completion: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify volume dr protection (vserver of the volume must be in a snapmirror relationship)
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ vserver_dr_protection: protected
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+
+ - name: Modify volume with snapshot auto delete options
+ na_ontap_volume:
+ state: present
+ name: vol_auto_delete
+ snapshot_auto_delete:
+ state: "on"
+ commitment: try
+ defer_delete: scheduled
+ target_free_space: 30
+ destroy_list: lun_clone,vol_clone
+ delete_order: newest_first
+ aggregate_name: "{{ aggr }}"
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+
+ - name: Move volume with force cutover action
+ na_ontap_volume:
+ name: ansible_vol
+ aggregate_name: aggr_ansible
+ cutover_action: force
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Rehost volume to another vserver auto remap luns
+ na_ontap_volume:
+ name: ansible_vol
+ from_vserver: ansible
+ auto_remap_luns: true
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Rehost volume to another vserver force unmap luns
+ na_ontap_volume:
+ name: ansible_vol
+ from_vserver: ansible
+ force_unmap_luns: true
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Snapshot restore volume
+ na_ontap_volume:
+ name: ansible_vol
+ vserver: ansible
+ snapshot_restore: 2020-05-24-weekly
+ force_restore: true
+ preserve_lun_ids: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+
+ - name: Volume create using application/applications nas template
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume12
+ vserver: ansibleSVM
+ size: 100000000
+ size_unit: b
+ space_guarantee: none
+ language: es
+ percent_snapshot_space: 60
+ unix_permissions: ---rwxrwxrwx
+ snapshot_policy: default
+ efficiency_policy: default
+ comment: testing
+ nas_application_template:
+ nfs_access: # the mere presence of a suboption is enough to enable this new feature
+ - access: ro
+ - access: rw
+ host: 10.0.0.0/8
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVolume(object):
+ '''Class with volume operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ is_infinite=dict(required=False, type='bool', default=False),
+ is_online=dict(required=False, type='bool', default=True),
+ size=dict(type='int', default=None),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'),
+ sizing_method=dict(choices=['add_new_resources', 'use_existing_resources'], type='str'),
+ aggregate_name=dict(type='str', default=None),
+ type=dict(type='str', default=None),
+ export_policy=dict(type='str', default=None, aliases=['policy']),
+ junction_path=dict(type='str', default=None),
+ space_guarantee=dict(choices=['none', 'file', 'volume'], default=None),
+ percent_snapshot_space=dict(type='int', default=None),
+ volume_security_style=dict(choices=['mixed', 'ntfs', 'unified', 'unix']),
+ encrypt=dict(required=False, type='bool', default=False),
+ efficiency_policy=dict(required=False, type='str'),
+ unix_permissions=dict(required=False, type='str'),
+ group_id=dict(required=False, type='int'),
+ user_id=dict(required=False, type='int'),
+ snapshot_policy=dict(required=False, type='str'),
+ aggr_list=dict(required=False, type='list', elements='str'),
+ aggr_list_multiplier=dict(required=False, type='int'),
+ snapdir_access=dict(required=False, type='bool'),
+ atime_update=dict(required=False, type='bool'),
+ auto_provision_as=dict(choices=['flexgroup'], required=False, type='str'),
+ wait_for_completion=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=180),
+ language=dict(type='str', required=False),
+ qos_policy_group=dict(required=False, type='str'),
+ qos_adaptive_policy_group=dict(required=False, type='str'),
+ nvfail_enabled=dict(type='bool', required=False),
+ space_slo=dict(type='str', required=False, choices=['none', 'thick', 'semi-thick']),
+ tiering_policy=dict(type='str', required=False, choices=['snapshot-only', 'auto', 'backup', 'none']),
+ vserver_dr_protection=dict(type='str', required=False, choices=['protected', 'unprotected']),
+ comment=dict(type='str', required=False),
+ snapshot_auto_delete=dict(type='dict', required=False),
+ cutover_action=dict(required=False, type='str', choices=['abort_on_failure', 'defer_on_failure', 'force', 'wait']),
+ check_interval=dict(required=False, type='int', default=30),
+ from_vserver=dict(required=False, type='str'),
+ auto_remap_luns=dict(required=False, type='bool'),
+ force_unmap_luns=dict(required=False, type='bool'),
+ force_restore=dict(required=False, type='bool'),
+ compression=dict(required=False, type='bool'),
+ inline_compression=dict(required=False, type='bool'),
+ preserve_lun_ids=dict(required=False, type='bool'),
+ snapshot_restore=dict(required=False, type='str'),
+ nas_application_template=dict(type='dict', options=dict(
+ use_nas_application=dict(type='bool', default=True),
+ flexcache=dict(type='dict', options=dict(
+ origin_svm_name=dict(required=True, type='str'),
+ origin_component_name=dict(required=True, type='str')
+ )),
+ cifs_access=dict(type='list', elements='dict', options=dict(
+ access=dict(type='str', choices=['change', 'full_control', 'no_access', 'read']),
+ user_or_group=dict(type='str')
+ )),
+ nfs_access=dict(type='list', elements='dict', options=dict(
+ access=dict(type='str', choices=['none', 'ro', 'rw']),
+ host=dict(type='str')
+ )),
+ storage_service=dict(type='str', choices=['value', 'performance', 'extreme']),
+ tiering=dict(type='dict', options=dict(
+ control=dict(type='str', choices=['required', 'best_effort', 'disallowed']),
+ policy=dict(type='str', choices=['snapshot-only', 'auto', 'backup', 'none']),
+ object_stores=dict(type='list', elements='str') # create only
+ ))
+ )),
+ size_change_threshold=dict(type='int', default=10),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ['space_guarantee', 'space_slo'], ['auto_remap_luns', 'force_unmap_luns']
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.volume_style = None
+ self.warnings = list()
+ self.sis_keys2zapi_get = dict(
+ efficiency_policy='policy',
+ compression='is-compression-enabled',
+ inline_compression='is-inline-compression-enabled')
+ self.sis_keys2zapi_set = dict(
+ efficiency_policy='policy-name',
+ compression='enable-compression',
+ inline_compression='enable-inline-compression')
+
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * \
+ self._size_unit_map[self.parameters['size_unit']]
+ if 'snapshot_auto_delete' in self.parameters:
+ for key in self.parameters['snapshot_auto_delete']:
+ if key not in ['commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete',
+ 'prefix', 'destroy_list', 'state']:
+ self.module.fail_json(msg="snapshot_auto_delete option '%s' is not valid." % key)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ # REST API for application/applications if needed
+ self.rest_api, self.rest_app = self.setup_rest_application()
+
+ def setup_rest_application(self):
+ use_application_template = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'use_nas_application'])
+ rest_api, rest_app = None, None
+ if use_application_template:
+ # consistency checks
+ # tiering policy is duplicated, make sure values are matching
+ tiering_policy_nas = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering', 'policy'])
+ tiering_policy = self.na_helper.safe_get(self.parameters, ['tiering_policy'])
+ if tiering_policy_nas is not None and tiering_policy is not None and tiering_policy_nas != tiering_policy:
+ msg = 'Conflict: if tiering_policy and nas_application_template tiering policy are both set, they must match.'
+ msg += ' Found "%s" and "%s".' % (tiering_policy, tiering_policy_nas)
+ self.module.fail_json(msg=msg)
+ # aggregate_name will force a move if present
+ if self.parameters.get('aggregate_name') is not None:
+ msg = 'Conflict: aggregate_name is not supported when application template is enabled.'\
+ ' Found: aggregate_name: %s' % self.parameters['aggregate_name']
+ self.module.fail_json(msg=msg)
+ nfs_access = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'nfs_access'])
+ if nfs_access is not None and self.na_helper.safe_get(self.parameters, ['export_policy']) is not None:
+ msg = 'Conflict: export_policy option and nfs_access suboption in nas_application_template are mutually exclusive.'
+ self.module.fail_json(msg=msg)
+ rest_api = netapp_utils.OntapRestAPI(self.module)
+ rest_app = RestApplication(rest_api, self.parameters['vserver'], self.parameters['name'])
+ return rest_api, rest_app
+
+ def volume_get_iter(self, vol_name=None):
+ """
+ Return volume-get-iter query results
+ :param vol_name: name of the volume
+ :return: NaElement
+ """
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', vol_name)
+ volume_id_attributes.add_new_child('vserver', self.parameters['vserver'])
+ volume_attributes.add_child_elem(volume_id_attributes)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+ volume_info.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(volume_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return result
+
+ def get_volume(self, vol_name=None):
+ """
+ Return details about the volume
+ :param:
+ name : Name of the volume
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ if vol_name is None:
+ vol_name = self.parameters['name']
+ volume_get_iter = self.volume_get_iter(vol_name)
+ return_value = None
+ if volume_get_iter.get_child_by_name('num-records') and \
+ int(volume_get_iter.get_child_content('num-records')) > 0:
+
+ volume_attributes = volume_get_iter['attributes-list']['volume-attributes']
+ volume_space_attributes = volume_attributes['volume-space-attributes']
+ volume_state_attributes = volume_attributes['volume-state-attributes']
+ volume_id_attributes = volume_attributes['volume-id-attributes']
+ try:
+ volume_export_attributes = volume_attributes['volume-export-attributes']
+ except KeyError: # does not exist for MDV volumes
+ volume_export_attributes = None
+ volume_security_unix_attributes = self.na_helper.safe_get(volume_attributes,
+ ['volume-security-attributes', 'volume-security-unix-attributes'],
+ allow_sparse_dict=False)
+ volume_snapshot_attributes = volume_attributes['volume-snapshot-attributes']
+ volume_performance_attributes = volume_attributes['volume-performance-attributes']
+ volume_snapshot_auto_delete_attributes = volume_attributes['volume-snapshot-autodelete-attributes']
+ try:
+ volume_comp_aggr_attributes = volume_attributes['volume-comp-aggr-attributes']
+ except KeyError: # Not supported in 9.1 to 9.3
+ volume_comp_aggr_attributes = None
+ # Get volume's state (online/offline)
+ current_state = volume_state_attributes['state']
+ is_online = (current_state == "online")
+
+ return_value = {
+ 'name': vol_name,
+ 'size': int(volume_space_attributes['size']),
+ 'is_online': is_online,
+ 'unix_permissions': volume_security_unix_attributes['permissions']
+ }
+ if volume_snapshot_attributes.get_child_by_name('snapshot-policy'):
+ return_value['snapshot_policy'] = volume_snapshot_attributes['snapshot-policy']
+ if volume_export_attributes is not None:
+ return_value['export_policy'] = volume_export_attributes['policy']
+ else:
+ return_value['export_policy'] = None
+ if volume_security_unix_attributes.get_child_by_name('group-id'):
+ return_value['group_id'] = int(volume_security_unix_attributes['group-id'])
+ if volume_security_unix_attributes.get_child_by_name('user-id'):
+ return_value['user_id'] = int(volume_security_unix_attributes['user-id'])
+ if volume_comp_aggr_attributes is not None:
+ return_value['tiering_policy'] = volume_comp_aggr_attributes['tiering-policy']
+ if volume_space_attributes.get_child_by_name('encrypt'):
+ return_value['encrypt'] = self.na_helper.get_value_for_bool(True, volume_space_attributes['encrypt'], 'encrypt')
+ if volume_space_attributes.get_child_by_name('percentage-snapshot-reserve'):
+ return_value['percent_snapshot_space'] = int(volume_space_attributes['percentage-snapshot-reserve'])
+ if volume_id_attributes.get_child_by_name('type'):
+ return_value['type'] = volume_id_attributes['type']
+ if volume_space_attributes.get_child_by_name('space-slo'):
+ return_value['space_slo'] = volume_space_attributes['space-slo']
+ else:
+ return_value['space_slo'] = None
+ if volume_state_attributes.get_child_by_name('is-nvfail-enabled'):
+ return_value['nvfail_enabled'] = self.na_helper.get_value_for_bool(True, volume_state_attributes['is-nvfail-enabled'], 'is-nvfail-enabled')
+ else:
+ return_value['nvfail_enabled'] = None
+ if volume_id_attributes.get_child_by_name('containing-aggregate-name'):
+ return_value['aggregate_name'] = volume_id_attributes['containing-aggregate-name']
+ else:
+ return_value['aggregate_name'] = None
+ if volume_id_attributes.get_child_by_name('junction-path'):
+ return_value['junction_path'] = volume_id_attributes['junction-path']
+ else:
+ return_value['junction_path'] = ''
+ if volume_id_attributes.get_child_by_name('comment'):
+ return_value['comment'] = volume_id_attributes['comment']
+ else:
+ return_value['comment'] = None
+ return_value['uuid'] = self.na_helper.safe_get(volume_id_attributes, ['instance-uuid'])
+ if volume_attributes['volume-security-attributes'].get_child_by_name('style'):
+ # style is not present if the volume is still offline or of type: dp
+ return_value['volume_security_style'] = volume_attributes['volume-security-attributes']['style']
+ if volume_id_attributes.get_child_by_name('style-extended'):
+ return_value['style_extended'] = volume_id_attributes['style-extended']
+ else:
+ return_value['style_extended'] = None
+ if volume_space_attributes.get_child_by_name('space-guarantee'):
+ return_value['space_guarantee'] = volume_space_attributes['space-guarantee']
+ else:
+ return_value['space_guarantee'] = None
+ if volume_snapshot_attributes.get_child_by_name('snapdir-access-enabled'):
+ return_value['snapdir_access'] = self.na_helper.get_value_for_bool(True,
+ volume_snapshot_attributes['snapdir-access-enabled'],
+ 'snapdir-access-enabled')
+ else:
+ return_value['snapdir_access'] = None
+ if volume_performance_attributes.get_child_by_name('is-atime-update-enabled'):
+ return_value['atime_update'] = self.na_helper.get_value_for_bool(True,
+ volume_performance_attributes['is-atime-update-enabled'],
+ 'is-atime-update-enabled')
+ else:
+ return_value['atime_update'] = None
+ if volume_attributes.get_child_by_name('volume-qos-attributes'):
+ volume_qos_attributes = volume_attributes['volume-qos-attributes']
+ if volume_qos_attributes.get_child_by_name('policy-group-name'):
+ return_value['qos_policy_group'] = volume_qos_attributes['policy-group-name']
+ else:
+ return_value['qos_policy_group'] = None
+ if volume_qos_attributes.get_child_by_name('adaptive-policy-group-name'):
+ return_value['qos_adaptive_policy_group'] = volume_qos_attributes['adaptive-policy-group-name']
+ else:
+ return_value['qos_adaptive_policy_group'] = None
+ else:
+ return_value['qos_policy_group'] = None
+ return_value['qos_adaptive_policy_group'] = None
+ if volume_attributes.get_child_by_name('volume-vserver-dr-protection-attributes'):
+ volume_vserver_dr_protection_attributes = volume_attributes['volume-vserver-dr-protection-attributes']
+ if volume_vserver_dr_protection_attributes.get_child_by_name('vserver-dr-protection'):
+ return_value['vserver_dr_protection'] = volume_vserver_dr_protection_attributes['vserver-dr-protection']
+ else:
+ return_value['vserver_dr_protection'] = None
+ # snapshot_auto_delete options
+ auto_delete = dict()
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('commitment'):
+ auto_delete['commitment'] = volume_snapshot_auto_delete_attributes['commitment']
+ else:
+ auto_delete['commitment'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('defer-delete'):
+ auto_delete['defer_delete'] = volume_snapshot_auto_delete_attributes['defer-delete']
+ else:
+ auto_delete['defer_delete'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('delete-order'):
+ auto_delete['delete_order'] = volume_snapshot_auto_delete_attributes['delete-order']
+ else:
+ auto_delete['delete_order'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('destroy-list'):
+ auto_delete['destroy_list'] = volume_snapshot_auto_delete_attributes['destroy-list']
+ else:
+ auto_delete['destroy_list'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('is-autodelete-enabled'):
+ if self.na_helper.get_value_for_bool(True, volume_snapshot_auto_delete_attributes['is-autodelete-enabled'], 'is-autodelete-enabled'):
+ auto_delete['state'] = 'on'
+ else:
+ auto_delete['state'] = 'off'
+ else:
+ auto_delete['is_autodelete_enabled'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('prefix'):
+ auto_delete['prefix'] = volume_snapshot_auto_delete_attributes['prefix']
+ else:
+ auto_delete['prefix'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('target-free-space'):
+ auto_delete['target_free_space'] = int(volume_snapshot_auto_delete_attributes['target-free-space'])
+ else:
+ auto_delete['target_free_space'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('trigger'):
+ auto_delete['trigger'] = volume_snapshot_auto_delete_attributes['trigger']
+ else:
+ auto_delete['trigger'] = None
+ return_value['snapshot_auto_delete'] = auto_delete
+ self.get_efficiency_info(return_value)
+
+ return return_value
+
+ def fail_on_error(self, error, api=None, stack=False):
+ if error is None:
+ return
+ if api is not None:
+ error = 'calling api: %s: %s' % (api, error)
+ results = dict(msg="Error: %s" % error)
+ if stack:
+ results['stack'] = traceback.format_stack()
+ self.module.fail_json(**results)
+
+ def create_nas_application_component(self):
+ '''Create application component for nas template'''
+ required_options = ('name', 'size')
+ for option in required_options:
+ if self.parameters.get(option) is None:
+ self.module.fail_json(msg='Error: "%s" is required to create nas application.' % option)
+
+ application_component = dict(
+ name=self.parameters['name'],
+ total_size=self.parameters['size'],
+ share_count=1, # 1 is the maximum value for nas
+ scale_out=(self.volume_style == 'flexGroup'),
+ )
+ name = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'storage_service'])
+ if name is not None:
+ application_component['storage_service'] = dict(name=name)
+
+ flexcache = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache'])
+ if flexcache is not None:
+ application_component['flexcache'] = dict(
+ origin=dict(
+ svm=dict(name=flexcache['origin_svm_name']),
+ component=dict(name=flexcache['origin_component_name'])
+ )
+ )
+
+ tiering = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering'])
+ if tiering is not None or self.parameters.get('tiering_policy') is not None:
+ application_component['tiering'] = dict()
+ if tiering is None:
+ tiering = dict()
+ if 'policy' not in tiering:
+ tiering['policy'] = self.parameters.get('tiering_policy')
+ for attr in ('control', 'policy', 'object_stores'):
+ value = tiering.get(attr)
+ if attr == 'object_stores' and value is not None:
+ value = [dict(name=x) for x in value]
+ if value is not None:
+ application_component['tiering'][attr] = value
+ if self.parameters.get('qos_policy') is not None:
+ application_component['qos'] = {
+ "policy": {
+ "name": self.parameters['qos_policy'],
+ }
+ }
+ if self.parameters.get('export_policy') is not None:
+ application_component['export_policy'] = {
+ "name": self.parameters['export_policy'],
+ }
+ return application_component
+
+ def create_volume_body(self):
+ '''Create body for nas template'''
+ nas = dict(application_components=[self.create_nas_application_component()])
+ value = self.na_helper.safe_get(self.parameters, ['snapshot_policy'])
+ if value is not None:
+ nas['protection_type'] = dict(local_policy=value)
+ for attr in ('nfs_access', 'cifs_access'):
+ value = self.na_helper.safe_get(self.parameters, ['nas_application_template', attr])
+ if value is not None:
+ # we expect value to be a list of dicts, with maybe some empty entries
+ value = self.na_helper.filter_out_none_entries(value)
+ if value:
+ nas[attr] = value
+ return self.rest_app.create_application_body("nas", nas)
+
+ def create_nas_application(self):
+ '''Use REST application/applications nas template to create a volume'''
+ body, error = self.create_volume_body()
+ self.fail_on_error(error)
+ response, error = self.rest_app.create_application(body)
+ self.fail_on_error(error)
+ return response
+
+ def create_volume(self):
+ '''Create ONTAP volume'''
+ if self.rest_app:
+ return self.create_nas_application()
+ if self.volume_style == 'flexGroup':
+ return self.create_volume_async()
+
+ options = self.create_volume_options()
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create', **options)
+ try:
+ self.server.invoke_successfully(volume_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else ''
+ self.module.fail_json(msg='Error provisioning volume %s%s: %s'
+ % (self.parameters['name'], size_msg, to_native(error)),
+ exception=traceback.format_exc())
+ self.ems_log_event("volume-create")
+
+ if self.parameters.get('wait_for_completion'):
+ # round off time_out
+ retries = (self.parameters['time_out'] + 5) // 10
+ is_online = None
+ errors = list()
+ while not is_online and retries > 0:
+ try:
+ current = self.get_volume()
+ is_online = None if current is None else current['is_online']
+ except KeyError as err:
+ # get_volume may receive incomplete data as the volume is being created
+ errors.append(repr(err))
+ if not is_online:
+ time.sleep(10)
+ retries = retries - 1
+ if not is_online:
+ errors.append("Timeout after %s seconds" % self.parameters['time_out'])
+ self.module.fail_json(msg='Error waiting for volume %s to come online: %s'
+ % (self.parameters['name'], str(errors)))
+ return None
+
+ def create_volume_async(self):
+ '''
+ create volume async.
+ '''
+ options = self.create_volume_options()
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create-async', **options)
+ if self.parameters.get('aggr_list'):
+ aggr_list_obj = netapp_utils.zapi.NaElement('aggr-list')
+ volume_create.add_child_elem(aggr_list_obj)
+ for aggr in self.parameters['aggr_list']:
+ aggr_list_obj.add_new_child('aggr-name', aggr)
+ try:
+ result = self.server.invoke_successfully(volume_create, enable_tunneling=True)
+ self.ems_log_event("volume-create")
+ except netapp_utils.zapi.NaApiError as error:
+ size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else ''
+ self.module.fail_json(msg='Error provisioning volume %s%s: %s'
+ % (self.parameters['name'], size_msg, to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'create')
+ return None
+
+ def create_volume_options(self):
+ '''Set volume options for create operation'''
+ options = {}
+ if self.volume_style == 'flexGroup':
+ options['volume-name'] = self.parameters['name']
+ if self.parameters.get('aggr_list_multiplier') is not None:
+ options['aggr-list-multiplier'] = str(self.parameters['aggr_list_multiplier'])
+ if self.parameters.get('auto_provision_as') is not None:
+ options['auto-provision-as'] = self.parameters['auto_provision_as']
+ if self.parameters.get('space_guarantee') is not None:
+ options['space-guarantee'] = self.parameters['space_guarantee']
+ else:
+ options['volume'] = self.parameters['name']
+ if self.parameters.get('aggregate_name') is None:
+ self.module.fail_json(msg='Error provisioning volume %s: aggregate_name is required'
+ % self.parameters['name'])
+ options['containing-aggr-name'] = self.parameters['aggregate_name']
+ if self.parameters.get('space_guarantee') is not None:
+ options['space-reserve'] = self.parameters['space_guarantee']
+
+ if self.parameters.get('size') is not None:
+ options['size'] = str(self.parameters['size'])
+ if self.parameters.get('snapshot_policy') is not None:
+ options['snapshot-policy'] = self.parameters['snapshot_policy']
+ if self.parameters.get('unix_permissions') is not None:
+ options['unix-permissions'] = self.parameters['unix_permissions']
+ if self.parameters.get('group_id') is not None:
+ options['group-id'] = str(self.parameters['group_id'])
+ if self.parameters.get('user_id') is not None:
+ options['user-id'] = str(self.parameters['user_id'])
+ if self.parameters.get('volume_security_style') is not None:
+ options['volume-security-style'] = self.parameters['volume_security_style']
+ if self.parameters.get('export_policy') is not None:
+ options['export-policy'] = self.parameters['export_policy']
+ if self.parameters.get('junction_path') is not None:
+ options['junction-path'] = self.parameters['junction_path']
+ if self.parameters.get('comment') is not None:
+ options['volume-comment'] = self.parameters['comment']
+ if self.parameters.get('type') is not None:
+ options['volume-type'] = self.parameters['type']
+ if self.parameters.get('percent_snapshot_space') is not None:
+ options['percentage-snapshot-reserve'] = str(self.parameters['percent_snapshot_space'])
+ if self.parameters.get('language') is not None:
+ options['language-code'] = self.parameters['language']
+ if self.parameters.get('qos_policy_group') is not None:
+ options['qos-policy-group-name'] = self.parameters['qos_policy_group']
+ if self.parameters.get('qos_adaptive_policy_group') is not None:
+ options['qos-adaptive-policy-group-name'] = self.parameters['qos_adaptive_policy_group']
+ if self.parameters.get('nvfail_enabled') is not None:
+ options['is-nvfail-enabled'] = str(self.parameters['nvfail_enabled'])
+ if self.parameters.get('space_slo') is not None:
+ options['space-slo'] = self.parameters['space_slo']
+ if self.parameters.get('tiering_policy') is not None:
+ options['tiering-policy'] = self.parameters['tiering_policy']
+ if self.parameters.get('encrypt') is not None:
+ options['encrypt'] = self.na_helper.get_value_for_bool(False, self.parameters['encrypt'], 'encrypt')
+ if self.parameters.get('vserver_dr_protection') is not None:
+ options['vserver-dr-protection'] = self.parameters['vserver_dr_protection']
+ if self.parameters['is_online']:
+ options['volume-state'] = 'online'
+ else:
+ options['volume-state'] = 'offline'
+ return options
+
+ def delete_volume(self, current):
+ '''Delete ONTAP volume'''
+ if self.parameters.get('is_infinite') or self.volume_style == 'flexGroup':
+ if current['is_online']:
+ self.change_volume_state(call_from_delete_vol=True)
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy-async', **{'volume-name': self.parameters['name']})
+ else:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy', **{'name': self.parameters['name'], 'unmount-and-offline': 'true'})
+ try:
+ result = self.server.invoke_successfully(volume_delete, enable_tunneling=True)
+ if self.parameters.get('is_infinite') or self.volume_style == 'flexGroup':
+ self.check_invoke_result(result, 'delete')
+ self.ems_log_event("volume-delete")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def move_volume(self):
+ '''Move volume from source aggregate to destination aggregate'''
+ volume_move = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-move-start', **{'source-volume': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ 'dest-aggr': self.parameters['aggregate_name']})
+ if self.parameters.get('cutover_action'):
+ volume_move.add_new_child('cutover-action', self.parameters['cutover_action'])
+ try:
+ self.cluster.invoke_successfully(volume_move,
+ enable_tunneling=True)
+ self.ems_log_event("volume-move")
+ except netapp_utils.zapi.NaApiError as error:
+ if not self.move_volume_with_rest_passthrough():
+ self.module.fail_json(msg='Error moving volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def move_volume_with_rest_passthrough(self):
+ # MDV volume will fail on a move, but will work using the REST CLI pass through
+ # vol move start -volume MDV_CRS_d6b0b313ff5611e9837100a098544e51_A -destination-aggregate data_a3 -vserver wmc66-a
+ rest_api = netapp_utils.OntapRestAPI(self.module)
+ use_rest = rest_api.is_rest()
+ # if REST isn't available fail with the original error
+ if not use_rest:
+ return False
+ # if REST exists let's try moving using the passthrough CLI
+ api = 'private/cli/volume/move/start'
+ data = {'volume:': self.parameters['name'],
+ 'destination-aggregate': self.parameters['aggregate_name'],
+ 'vserver': self.parameters['vserver']}
+ dummy, error = rest_api.patch(api, data)
+ if error is not None:
+ self.module.fail_json(msg='Error moving volume %s: %s' % (self.parameters['name'], error))
+ return True
+
+ def wait_for_volume_move(self):
+ waiting = True
+ fail_count = 0
+ while waiting:
+ volume_move_iter = netapp_utils.zapi.NaElement('volume-move-get-iter')
+ volume_move_info = netapp_utils.zapi.NaElement('volume-move-info')
+ volume_move_info.add_new_child('volume', self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_move_info)
+ volume_move_iter.add_child_elem(query)
+ try:
+ result = self.cluster.invoke_successfully(volume_move_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if fail_count < 3:
+ fail_count += 1
+ time.sleep(self.parameters['check_interval'])
+ continue
+ self.module.fail_json(msg='Error getting volume move status: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ # reset fail count to 0
+ fail_count = 0
+ volume_move_status = result.get_child_by_name('attributes-list').get_child_by_name('volume-move-info').\
+ get_child_content('state')
+ # We have 5 states that can be returned.
+ # warning and healthy are state where the move is still going so we don't need to do anything for thouse.
+ if volume_move_status == 'done':
+ waiting = False
+ if volume_move_status in ['failed', 'alert']:
+ self.module.fail_json(msg='Error moving volume %s: %s' %
+ (self.parameters['name'],
+ result.get_child_by_name('attributes-list')[0].get_child_by_name('details')))
+ time.sleep(self.parameters['check_interval'])
+
+ def rename_volume(self):
+ """
+ Rename the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume. Use time_out parameter to set wait time for rename completion.
+ """
+ vol_rename_zapi, vol_name_zapi = ['volume-rename-async', 'volume-name'] if self.parameters['is_infinite']\
+ else ['volume-rename', 'volume']
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_rename_zapi, **{vol_name_zapi: self.parameters['from_name'],
+ 'new-volume-name': str(self.parameters['name'])})
+ try:
+ result = self.server.invoke_successfully(volume_rename, enable_tunneling=True)
+ if vol_rename_zapi == 'volume-rename-async':
+ self.check_invoke_result(result, 'rename')
+ self.ems_log_event("volume-rename")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rest_resize_volume(self):
+ """
+ Re-size the volume using REST PATCH method.
+ """
+ uuid = self.parameters['uuid']
+ if uuid is None:
+ self.module.fail_json(msg='Could not read UUID for volume %s' % self.parameters['name'])
+ api = '/storage/volumes/%s' % uuid
+ body = dict(size=self.parameters['size'])
+ query = dict(sizing_method=self.parameters['sizing_method'])
+ rest_api = netapp_utils.OntapRestAPI(self.module)
+ response, error = rest_api.patch(api, body, query)
+ self.fail_on_error(error, api)
+ return response
+
+ def resize_volume(self):
+ """
+ Re-size the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to resize an
+ Infinite Volume.
+ """
+ if self.parameters.get('sizing_method') is not None:
+ return self.rest_resize_volume()
+
+ vol_size_zapi, vol_name_zapi = ['volume-size-async', 'volume-name']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexGroup')\
+ else ['volume-size', 'volume']
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_size_zapi, **{vol_name_zapi: self.parameters['name'],
+ 'new-size': str(self.parameters['size'])})
+ try:
+ result = self.server.invoke_successfully(volume_resize, enable_tunneling=True)
+ if vol_size_zapi == 'volume-size-async':
+ self.check_invoke_result(result, 'resize')
+ self.ems_log_event("volume-resize")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error re-sizing volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return None
+
+ def change_volume_state(self, call_from_delete_vol=False):
+ """
+ Change volume's state (offline/online).
+ """
+ if self.parameters['is_online'] and not call_from_delete_vol: # Desired state is online, setup zapi APIs respectively
+ vol_state_zapi, vol_name_zapi, action = ['volume-online-async', 'volume-name', 'online']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexGroup')\
+ else ['volume-online', 'name', 'online']
+ else: # Desired state is offline, setup zapi APIs respectively
+ vol_state_zapi, vol_name_zapi, action = ['volume-offline-async', 'volume-name', 'offline']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexGroup')\
+ else ['volume-offline', 'name', 'offline']
+ volume_unmount = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **{'volume-name': self.parameters['name']})
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_state_zapi, **{vol_name_zapi: self.parameters['name']})
+ try:
+ if not self.parameters['is_online'] or call_from_delete_vol: # Unmount before offline
+ self.server.invoke_successfully(volume_unmount, enable_tunneling=True)
+ result = self.server.invoke_successfully(volume_change_state, enable_tunneling=True)
+ if self.volume_style == 'flexGroup' or self.parameters['is_infinite']:
+ self.check_invoke_result(result, action)
+ self.ems_log_event("change-state")
+ except netapp_utils.zapi.NaApiError as error:
+ state = "online" if self.parameters['is_online'] else "offline"
+ self.module.fail_json(msg='Error changing the state of volume %s to %s: %s'
+ % (self.parameters['name'], state, to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_volume_attribute(self, zapi_object, parent_attribute, attribute, value):
+ """
+
+ :param parent_attribute:
+ :param child_attribute:
+ :param value:
+ :return:
+ """
+ if isinstance(parent_attribute, str):
+ vol_attribute = netapp_utils.zapi.NaElement(parent_attribute)
+ vol_attribute.add_new_child(attribute, value)
+ zapi_object.add_child_elem(vol_attribute)
+ else:
+ zapi_object.add_new_child(attribute, value)
+ parent_attribute.add_child_elem(zapi_object)
+
+ def volume_modify_attributes(self, params):
+ """
+ modify volume parameter 'export_policy','unix_permissions','snapshot_policy','space_guarantee', 'percent_snapshot_space',
+ 'qos_policy_group', 'qos_adaptive_policy_group'
+ """
+ if self.volume_style == 'flexGroup' or self.parameters['is_infinite']:
+ vol_mod_iter = netapp_utils.zapi.NaElement('volume-modify-iter-async')
+ else:
+ vol_mod_iter = netapp_utils.zapi.NaElement('volume-modify-iter')
+ attributes = netapp_utils.zapi.NaElement('attributes')
+ vol_mod_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ # Volume-attributes is split in to 25 sub categories
+ # volume-space-attributes
+ vol_space_attributes = netapp_utils.zapi.NaElement('volume-space-attributes')
+ if self.parameters.get('space_guarantee') is not None:
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes,
+ 'space-guarantee', self.parameters['space_guarantee'])
+ if self.parameters.get('percent_snapshot_space') is not None:
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes,
+ 'percentage-snapshot-reserve', str(self.parameters['percent_snapshot_space']))
+ if self.parameters.get('space_slo') is not None:
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'space-slo', self.parameters['space_slo'])
+ # volume-snapshot-attributes
+ vol_snapshot_attributes = netapp_utils.zapi.NaElement('volume-snapshot-attributes')
+ if self.parameters.get('snapshot_policy') is not None:
+ self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes,
+ 'snapshot-policy', self.parameters['snapshot_policy'])
+ if self.parameters.get('snapdir_access') is not None:
+ self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes,
+ 'snapdir-access-enabled',
+ self.na_helper.get_value_for_bool(False, self.parameters['snapdir_access'], 'snapdir_access'))
+ # volume-export-attributes
+ if self.parameters.get('export_policy') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-export-attributes',
+ 'policy', self.parameters['export_policy'])
+ # volume-security-attributes
+ if self.parameters.get('unix_permissions') is not None or self.parameters.get('group_id') is not None or self.parameters.get('user_id') is not None:
+ vol_security_attributes = netapp_utils.zapi.NaElement('volume-security-attributes')
+ vol_security_unix_attributes = netapp_utils.zapi.NaElement('volume-security-unix-attributes')
+ if self.parameters.get('unix_permissions') is not None:
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
+ 'permissions', self.parameters['unix_permissions'])
+ if self.parameters.get('group_id') is not None:
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
+ 'group-id', str(self.parameters['group_id']))
+ if self.parameters.get('user_id') is not None:
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
+ 'user-id', str(self.parameters['user_id']))
+ vol_mod_attributes.add_child_elem(vol_security_attributes)
+ if params and params.get('volume_security_style') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-security-attributes',
+ 'style', self.parameters['volume_security_style'])
+
+ # volume-performance-attributes
+ if self.parameters.get('atime_update') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-performance-attributes',
+ 'is-atime-update-enabled', self.na_helper.get_value_for_bool(False, self.parameters['atime_update'], 'atime_update'))
+ # volume-qos-attributes
+ if self.parameters.get('qos_policy_group') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-qos-attributes',
+ 'policy-group-name', self.parameters['qos_policy_group'])
+ if self.parameters.get('qos_adaptive_policy_group') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-qos-attributes',
+ 'adaptive-policy-group-name', self.parameters['qos_adaptive_policy_group'])
+ # volume-comp-aggr-attributes
+ if params and params.get('tiering_policy') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-comp-aggr-attributes',
+ 'tiering-policy', self.parameters['tiering_policy'])
+ # volume-state-attributes
+ if self.parameters.get('nvfail_enabled') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-state-attributes', 'is-nvfail-enabled', str(self.parameters['nvfail_enabled']))
+ # volume-dr-protection-attributes
+ if self.parameters.get('vserver_dr_protection') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-vserver-dr-protection-attributes',
+ 'vserver-dr-protection', self.parameters['vserver_dr_protection'])
+ # volume-id-attributes
+ if self.parameters.get('comment') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-id-attributes',
+ 'comment', self.parameters['comment'])
+ # End of Volume-attributes sub attributes
+ attributes.add_child_elem(vol_mod_attributes)
+ query = netapp_utils.zapi.NaElement('query')
+ vol_query_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ self.create_volume_attribute(vol_query_attributes, 'volume-id-attributes',
+ 'name', self.parameters['name'])
+ query.add_child_elem(vol_query_attributes)
+ vol_mod_iter.add_child_elem(attributes)
+ vol_mod_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(vol_mod_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ error_msg = to_native(error)
+ if 'volume-comp-aggr-attributes' in error_msg:
+ error_msg += ". Added info: tiering option requires 9.4 or later."
+ self.module.fail_json(msg='Error modifying volume %s: %s'
+ % (self.parameters['name'], error_msg),
+ exception=traceback.format_exc())
+
+ self.ems_log_event("volume-modify")
+ failures = result.get_child_by_name('failure-list')
+ # handle error if modify space, policy, or unix-permissions parameter fails
+ if failures is not None:
+ error_msgs = list()
+ for return_info in ('volume-modify-iter-info', 'volume-modify-iter-async-info'):
+ if failures.get_child_by_name(return_info) is not None:
+ error_msgs.append(failures.get_child_by_name(return_info).get_child_content('error-message'))
+ if error_msgs and any([x is not None for x in error_msgs]):
+ self.module.fail_json(msg="Error modifying volume %s: %s"
+ % (self.parameters['name'], ' --- '.join(error_msgs)),
+ exception=traceback.format_exc())
+ if self.volume_style == 'flexGroup' or self.parameters['is_infinite']:
+ success = result.get_child_by_name('success-list')
+ success = success.get_child_by_name('volume-modify-iter-async-info')
+ results = dict()
+ for key in ('status', 'jobid'):
+ if success and success.get_child_by_name(key):
+ results[key] = success[key]
+ status = results.get('status')
+ if status == 'in_progress' and 'jobid' in results:
+ if self.parameters['time_out'] == 0:
+ return
+ error = self.check_job_status(results['jobid'])
+ if error is None:
+ return
+ self.module.fail_json(msg='Error when modify volume: %s' % error)
+ self.module.fail_json(msg='Unexpected error when modifying volume: result is: %s' % str(result.to_string()))
+
+ def volume_mount(self):
+ """
+ Mount an existing volume in specified junction_path
+ :return: None
+ """
+ vol_mount = netapp_utils.zapi.NaElement('volume-mount')
+ vol_mount.add_new_child('volume-name', self.parameters['name'])
+ vol_mount.add_new_child('junction-path', self.parameters['junction_path'])
+ try:
+ self.server.invoke_successfully(vol_mount, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error mounting volume %s on path %s: %s'
+ % (self.parameters['name'], self.parameters['junction_path'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def volume_unmount(self):
+ """
+ Unmount an existing volume
+ :return: None
+ """
+ vol_unmount = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **{'volume-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(vol_unmount, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error unmounting volume %s: %s'
+ % (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def modify_volume(self, modify):
+ '''Modify volume action'''
+ attributes = modify.keys()
+ # order matters here, if both is_online and mount in modify, must bring the volume online first.
+ if 'is_online' in attributes:
+ self.change_volume_state()
+ for attribute in attributes:
+ if attribute in ['space_guarantee', 'export_policy', 'unix_permissions', 'group_id', 'user_id', 'tiering_policy',
+ 'snapshot_policy', 'percent_snapshot_space', 'snapdir_access', 'atime_update', 'volume_security_style',
+ 'nvfail_enabled', 'space_slo', 'qos_policy_group', 'qos_adaptive_policy_group', 'vserver_dr_protection', 'comment']:
+ self.volume_modify_attributes(modify)
+ break
+ if 'snapshot_auto_delete' in attributes:
+ self.set_snapshot_auto_delete()
+ if 'junction_path' in attributes:
+ if modify.get('junction_path') == '':
+ self.volume_unmount()
+ else:
+ self.volume_mount()
+ if 'size' in attributes:
+ self.resize_volume()
+ if 'aggregate_name' in attributes:
+ # keep it last, as it may take some time
+ self.move_volume()
+ if self.parameters.get('wait_for_completion'):
+ self.wait_for_volume_move()
+
+ def compare_chmod_value(self, current):
+ """
+ compare current unix_permissions to desire unix_permissions.
+ :return: True if the same, False it not the same or desire unix_permissions is not valid.
+ """
+ desire = self.parameters
+ if current is None:
+ return False
+ octal_value = ''
+ unix_permissions = desire['unix_permissions']
+ if unix_permissions.isdigit():
+ return int(current['unix_permissions']) == int(unix_permissions)
+ else:
+ if len(unix_permissions) != 12:
+ return False
+ if unix_permissions[:3] != '---':
+ return False
+ for i in range(3, len(unix_permissions), 3):
+ if unix_permissions[i] not in ['r', '-'] or unix_permissions[i + 1] not in ['w', '-']\
+ or unix_permissions[i + 2] not in ['x', '-']:
+ return False
+ group_permission = self.char_to_octal(unix_permissions[i:i + 3])
+ octal_value += str(group_permission)
+ return int(current['unix_permissions']) == int(octal_value)
+
+ def char_to_octal(self, chars):
+ """
+ :param chars: Characters to be converted into octal values.
+ :return: octal value of the individual group permission.
+ """
+ total = 0
+ if chars[0] == 'r':
+ total += 4
+ if chars[1] == 'w':
+ total += 2
+ if chars[2] == 'x':
+ total += 1
+ return total
+
+ def get_volume_style(self, current):
+ '''Get volume style, infinite or standard flexvol'''
+ if current is None:
+ if self.parameters.get('aggr_list') or self.parameters.get('aggr_list_multiplier') or self.parameters.get('auto_provision_as'):
+ return 'flexGroup'
+ else:
+ if current.get('style_extended'):
+ if current['style_extended'] == 'flexgroup':
+ return 'flexGroup'
+ else:
+ return current['style_extended']
+ return None
+
+ def get_job(self, jobid, server):
+ """
+ Get job details by id
+ """
+ job_get = netapp_utils.zapi.NaElement('job-get')
+ job_get.add_new_child('job-id', jobid)
+ try:
+ result = server.invoke_successfully(job_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # Not found
+ return None
+ self.module.fail_json(msg='Error fetching job info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ job_info = result.get_child_by_name('attributes').get_child_by_name('job-info')
+ results = {
+ 'job-progress': job_info['job-progress'],
+ 'job-state': job_info['job-state']
+ }
+ if job_info.get_child_by_name('job-completion') is not None:
+ results['job-completion'] = job_info['job-completion']
+ else:
+ results['job-completion'] = None
+ return results
+
+ def check_job_status(self, jobid):
+ """
+ Loop until job is complete
+ """
+ server = self.server
+ sleep_time = 5
+ time_out = self.parameters['time_out']
+ results = self.get_job(jobid, server)
+ error = 'timeout'
+
+ while time_out > 0:
+ results = self.get_job(jobid, server)
+ # If running as cluster admin, the job is owned by cluster vserver
+ # rather than the target vserver.
+ if results is None and server == self.server:
+ results = netapp_utils.get_cserver(self.server)
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ continue
+ if results is None:
+ error = 'cannot locate job with id: %d' % int(jobid)
+ break
+ if results['job-state'] in ('queued', 'running'):
+ time.sleep(sleep_time)
+ time_out -= sleep_time
+ continue
+ if results['job-state'] in ('success', 'failure'):
+ break
+ else:
+ self.module.fail_json(msg='Unexpected job status in: %s' % repr(results))
+
+ if results is not None:
+ if results['job-state'] == 'success':
+ error = None
+ elif results['job-state'] in ('queued', 'running'):
+ error = 'job completion exceeded expected timer of: %s seconds' % \
+ self.parameters['time_out']
+ else:
+ if results['job-completion'] is not None:
+ error = results['job-completion']
+ else:
+ error = results['job-progress']
+ return error
+
+ def check_invoke_result(self, result, action):
+ '''
+ check invoked api call back result.
+ '''
+ results = dict()
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ return
+ error = self.check_job_status(results['result-jobid'])
+ if error is None:
+ return
+ else:
+ self.module.fail_json(msg='Error when %s volume: %s' % (action, error))
+ if status == 'failed':
+ self.module.fail_json(msg='Operation failed when %s volume.' % action)
+
+ def set_efficiency_attributes(self, options):
+ for key, attr in self.sis_keys2zapi_set.items():
+ value = self.parameters.get(key)
+ if value is not None:
+ if self.argument_spec[key]['type'] == 'bool':
+ value = self.na_helper.get_value_for_bool(False, value)
+ options[attr] = value
+ # ZAPI requires compression to be set for inline-compression
+ if options.get('enable-inline-compression') == 'true' and 'enable-compression' not in options:
+ options['enable-compression'] = 'true'
+
+ def set_efficiency_config(self):
+ '''Set efficiency policy and compression attributes'''
+ options = {'path': '/vol/' + self.parameters['name']}
+ efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable', **options)
+ try:
+ self.server.invoke_successfully(efficiency_enable, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 40043 denotes an Operation has already been enabled.
+ if to_native(error.code) == "40043":
+ pass
+ else:
+ self.module.fail_json(msg='Error enable efficiency on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ self.set_efficiency_attributes(options)
+ efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config', **options)
+ try:
+ self.server.invoke_successfully(efficiency_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting up efficiency attributes on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_efficiency_config_async(self):
+ """Set efficiency policy and compression attributes in asynchronous mode"""
+ options = {'volume-name': self.parameters['name']}
+ efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable-async', **options)
+ try:
+ result = self.server.invoke_successfully(efficiency_enable, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error enable efficiency on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'enable efficiency on')
+
+ self.set_efficiency_attributes(options)
+ efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config-async', **options)
+ try:
+ result = self.server.invoke_successfully(efficiency_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting up efficiency attributes on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'set efficiency policy on')
+
+ def get_efficiency_info(self, return_value):
+ """
+ get the name of the efficiency policy assigned to volume, as well as compression values
+ if attribute does not exist, set its value to None
+ :return: update return_value dict.
+ """
+ sis_info = netapp_utils.zapi.NaElement('sis-get-iter')
+ sis_status_info = netapp_utils.zapi.NaElement('sis-status-info')
+ sis_status_info.add_new_child('path', '/vol/' + self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(sis_status_info)
+ sis_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(sis_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching efficiency policy for volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ for key in self.sis_keys2zapi_get:
+ return_value[key] = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ sis_attributes = result.get_child_by_name('attributes-list'). get_child_by_name('sis-status-info')
+ for key, attr in self.sis_keys2zapi_get.items():
+ value = sis_attributes.get_child_content(attr)
+ if self.argument_spec[key]['type'] == 'bool':
+ value = self.na_helper.get_value_for_bool(True, value)
+ return_value[key] = value
+
+ def modify_volume_efficiency_config(self, efficiency_config_modify_value):
+ if efficiency_config_modify_value == 'async':
+ self.set_efficiency_config_async()
+ else:
+ self.set_efficiency_config()
+
+ def set_snapshot_auto_delete(self):
+ options = {'volume': self.parameters['name']}
+ desired_options = self.parameters['snapshot_auto_delete']
+ for key, value in desired_options.items():
+ options['option-name'] = key
+ options['option-value'] = str(value)
+ snapshot_auto_delete = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-autodelete-set-option', **options)
+ try:
+ self.server.invoke_successfully(snapshot_auto_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting snapshot auto delete options for volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rehost_volume(self):
+ volume_rehost = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rehost', **{'vserver': self.parameters['from_vserver'],
+ 'destination-vserver': self.parameters['vserver'],
+ 'volume': self.parameters['name']})
+ if self.parameters.get('auto_remap_luns') is not None:
+ volume_rehost.add_new_child('auto-remap-luns', str(self.parameters['auto_remap_luns']))
+ if self.parameters.get('force_unmap_luns') is not None:
+ volume_rehost.add_new_child('force-unmap-luns', str(self.parameters['force_unmap_luns']))
+ try:
+ self.cluster.invoke_successfully(volume_rehost, enable_tunneling=True)
+ self.ems_log_event("volume-rehost")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error rehosting volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapshot_restore_volume(self):
+ snapshot_restore = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapshot-restore-volume', **{'snapshot': self.parameters['snapshot_restore'],
+ 'volume': self.parameters['name']})
+ if self.parameters.get('force_restore') is not None:
+ snapshot_restore.add_new_child('force', str(self.parameters['force_restore']))
+ if self.parameters.get('preserve_lun_ids') is not None:
+ snapshot_restore.add_new_child('preserve-lun-ids', str(self.parameters['preserve_lun_ids']))
+ try:
+ self.server.invoke_successfully(snapshot_restore, enable_tunneling=True)
+ self.ems_log_event("snapshot-restore-volume")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error restoring volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def adjust_size(self, current, after_create):
+ """
+ ignore small change in size by resetting expectations
+ """
+ if after_create:
+ # ignore change in size immediately after a create:
+ self.parameters['size'] = current['size']
+ elif self.parameters['size_change_threshold'] > 0:
+ if 'size' in current and self.parameters.get('size') is not None:
+ # ignore a less than XX% difference
+ if abs(current['size'] - self.parameters['size']) * 100 / current['size'] < self.parameters['size_change_threshold']:
+ self.parameters['size'] = current['size']
+
+ def set_modify_dict(self, current, after_create=False):
+ '''Fill modify dict with changes'''
+ # snapshot_auto_delete's value is a dict, get_modified_attributes function doesn't support dict as value.
+ auto_delete_info = current.pop('snapshot_auto_delete', None)
+ # ignore small changes in size by adjusting self.parameters['size']
+ self.adjust_size(current, after_create)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify is not None and 'type' in modify:
+ self.module.fail_json(msg="Changing the same volume from one type to another is not allowed.")
+ if self.parameters.get('snapshot_auto_delete') is not None:
+ auto_delete_modify = self.na_helper.get_modified_attributes(auto_delete_info,
+ self.parameters['snapshot_auto_delete'])
+ if len(auto_delete_modify) > 0:
+ modify['snapshot_auto_delete'] = auto_delete_modify
+ return modify
+
+ def take_modify_actions(self, modify):
+ if modify.get('is_online'):
+ # when moving to online, include parameters that get does not return when volume is offline
+ for field in ['volume_security_style', 'group_id', 'user_id', 'percent_snapshot_space']:
+ if self.parameters.get(field) is not None:
+ modify[field] = self.parameters[field]
+ self.modify_volume(modify)
+
+ if any([modify.get(key) is not None for key in self.sis_keys2zapi_get]):
+ if self.parameters.get('is_infinite') or self.volume_style == 'flexGroup':
+ efficiency_config_modify = 'async'
+ else:
+ efficiency_config_modify = 'sync'
+ self.modify_volume_efficiency_config(efficiency_config_modify)
+
+ def apply(self):
+ '''Call create/modify/delete operations'''
+ response = None
+ modify_after_create = None
+ current = self.get_volume()
+ self.volume_style = self.get_volume_style(current)
+ # rename and create are mutually exclusive
+ rename, rehost, snapshot_restore, cd_action, modify = None, None, None, None, None
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_volume(self.parameters['from_name']), current)
+ elif self.parameters.get('from_vserver'):
+ rehost = True
+ self.na_helper.changed = True
+ elif self.parameters.get('snapshot_restore'):
+ snapshot_restore = True
+ self.na_helper.changed = True
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters.get('unix_permissions') is not None:
+ # current stores unix_permissions' numeric value.
+ # unix_permission in self.parameter can be either numeric or character.
+ if self.compare_chmod_value(current) or not self.parameters['is_online']:
+ # don't change if the values are the same
+ # can't change permissions if not online
+ del self.parameters['unix_permissions']
+ if cd_action is None and rename is None and rehost is None and self.parameters['state'] == 'present':
+ modify = self.set_modify_dict(current)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_volume()
+ if rehost:
+ self.rehost_volume()
+ if snapshot_restore:
+ self.snapshot_restore_volume()
+ if cd_action == 'create':
+ response = self.create_volume()
+ # if we create using ZAPI and modify only options are set (snapdir_access or atime_update), we need to run a modify.
+ # The modify also takes care of efficiency (sis) parameters and snapshot_auto_delete.
+ # If we create using REST application, some options are not available, we may need to run a modify.
+ current = self.get_volume()
+ if current:
+ modify_after_create = self.set_modify_dict(current, after_create=True)
+ if modify_after_create:
+ self.take_modify_actions(modify_after_create)
+ # restore this, as set_modify_dict could set it to False
+ self.na_helper.changed = True
+ elif cd_action == 'delete':
+ self.delete_volume(current)
+ elif modify:
+ self.parameters['uuid'] = current['uuid']
+ self.take_modify_actions(modify)
+
+ result = dict(
+ changed=self.na_helper.changed
+ )
+ if response is not None:
+ result['response'] = response
+ if modify:
+ result['modify'] = modify
+ if modify_after_create:
+ result['modify_after_create'] = modify_after_create
+ if self.warnings:
+ result['warnings'] = self.warnings
+ self.module.exit_json(**result)
+
+ def ems_log_event(self, state):
+ '''Autosupport log event'''
+ if state == 'create':
+ message = "A Volume has been created, size: " + \
+ str(self.parameters['size']) + str(self.parameters['size_unit'])
+ elif state == 'volume-delete':
+ message = "A Volume has been deleted"
+ elif state == 'volume-move':
+ message = "A Volume has been moved"
+ elif state == 'volume-rename':
+ message = "A Volume has been renamed"
+ elif state == 'volume-resize':
+ message = "A Volume has been resized to: " + \
+ str(self.parameters['size']) + str(self.parameters['size_unit'])
+ elif state == 'volume-rehost':
+ message = "A Volume has been rehosted"
+ elif state == 'snapshot-restore-volume':
+ message = "A Volume has been restored by snapshot"
+ elif state == 'volume-change':
+ message = "A Volume state has been changed"
+ else:
+ message = "na_ontap_volume has been called"
+ netapp_utils.ems_log_event(
+ "na_ontap_volume", self.server, event=message)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapVolume()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py
new file mode 100644
index 00000000..b3433133
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_volume_autosize
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_volume_autosize
+short_description: NetApp ONTAP manage volume autosize
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify Volume AutoSize
+options:
+ volume:
+ description:
+ - The name of the flexible volume for which we want to set autosize.
+ type: str
+ required: true
+
+ mode:
+ description:
+ - Specify the flexible volume's autosize mode of operation.
+ type: str
+ choices: ['grow', 'grow_shrink', 'off']
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ grow_threshold_percent:
+ description:
+ - Specifies the percentage of the flexible volume's capacity at which autogrow is initiated.
+ - The default grow threshold varies from 85% to 98%, depending on the volume size.
+ - It is an error for the grow threshold to be less than or equal to the shrink threshold.
+ - Range between 0 and 100
+ type: int
+
+ increment_size:
+ description:
+ - Specify the flexible volume's increment size using the following format < number > [k|m|g|t]
+ - The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ type: str
+
+ maximum_size:
+ description:
+ - Specify the flexible volume's maximum allowed size using the following format < number > [k|m|g|t]
+ - The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ - The default value is 20% greater than the volume size at the time autosize was enabled.
+ - It is an error for the maximum volume size to be less than the current volume size.
+ - It is also an error for the maximum size to be less than or equal to the minimum size.
+ type: str
+
+ minimum_size:
+ description:
+ - Specify the flexible volume's minimum allowed size using the following format < number > [k|m|g|t] The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ - The default value is the size of the volume at the time the 'grow_shrink' mode was enabled.
+ - It is an error for the minimum size to be greater than or equal to the maximum size.
+ type: str
+
+ reset:
+ description:
+ - "Sets the values of maximum_size, increment_size, minimum_size, grow_threshold_percent, shrink_threshold_percent and mode to their defaults"
+ - If reset paramater is present system will always perform reset action, so idempotency is not supported.
+ type: bool
+
+ shrink_threshold_percent:
+ description:
+ - Specifies the percentage of the flexible volume's capacity at which autoshrink is initiated.
+ - The default shrink theshold is 50%. It is an error for the shrink threshold to be greater than or equal to the grow threshold.
+ - Range between 0 and 100
+ type: int
+'''
+
+EXAMPLES = """
+ - name: Modify volume autosize
+ na_ontap_volume_autosize:
+ hostname: 10.193.79.189
+ username: admin
+ password: netapp1!
+ volume: ansibleVolumesize12
+ mode: grow
+ grow_threshold_percent: 99
+ increment_size: 50m
+ maximum_size: 10g
+ minimum_size: 21m
+ shrink_threshold_percent: 40
+ vserver: ansible_vserver
+
+ - name: Reset volume autosize
+ na_ontap_volume_autosize:
+ hostname: 10.193.79.189
+ username: admin
+ password: netapp1!
+ volume: ansibleVolumesize12
+ reset: true
+ vserver: ansible_vserver
+"""
+
+RETURN = """
+"""
+import copy
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVolumeAutosize(object):
+ ''' volume autosize configuration '''
+ def __init__(self):
+ self.use_rest = False
+ # Volume_autosize returns KB and not B like Volume so values are shifted down 1
+ self._size_unit_map = dict(
+ k=1,
+ m=1024,
+ g=1024 ** 2,
+ t=1024 ** 3,
+ )
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ volume=dict(required=True, type="str"),
+ mode=dict(required=False, choices=['grow', 'grow_shrink', 'off']),
+ vserver=dict(required=True, type='str'),
+ grow_threshold_percent=dict(required=False, type='int'),
+ increment_size=dict(required=False, type='str'),
+ maximum_size=dict(required=False, type='str'),
+ minimum_size=dict(required=False, type='str'),
+ reset=dict(required=False, type='bool'),
+ shrink_threshold_percent=dict(required=False, type='int')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['reset', 'maximum_size'],
+ ['reset', 'increment_size'],
+ ['reset', 'minimum_size'],
+ ['reset', 'grow_threshold_percent'],
+ ['reset', 'shrink_threshold_percent'],
+ ['reset', 'mode']
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ # increment size and reset are not supported with rest api
+ if self.parameters.get('increment_size'):
+ self.module.fail_json(msg="Rest API does not support increment size, please switch to ZAPI")
+ if self.parameters.get('reset'):
+ self.module.fail_json(msg="Rest API does not support reset, please switch to ZAPI")
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_volume_autosize(self, uuid=None):
+ """
+ Get volume_autosize information from the ONTAP system
+ :return:
+ """
+ if self.use_rest:
+ params = {'fields': 'autosize'}
+ api = 'storage/volumes/' + uuid
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ return self._create_get_volume_return(message['autosize'])
+ else:
+ volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-get')
+ volume_autosize_info.add_new_child('volume', self.parameters['volume'])
+ try:
+ result = self.server.invoke_successfully(volume_autosize_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching volume autosize infor for %s : %s' % (self.parameters['volume'],
+ to_native(error)),
+ exception=traceback.format_exc())
+ return self._create_get_volume_return(result)
+
+ def _create_get_volume_return(self, results):
+ """
+ Create a return value from volume-autosize-get info file
+ :param results:
+ :return:
+ """
+ return_value = {}
+ if self.use_rest:
+ if 'mode' in results:
+ return_value['mode'] = results['mode']
+ if 'grow_threshold' in results:
+ return_value['grow_threshold_percent'] = results['grow_threshold']
+ if 'maximum' in results:
+ return_value['maximum_size'] = results['maximum']
+ if 'minimum' in results:
+ return_value['minimum_size'] = results['minimum']
+ if 'shrink_threshold' in results:
+ return_value['shrink_threshold_percent'] = results['shrink_threshold']
+ else:
+ if results.get_child_by_name('mode'):
+ return_value['mode'] = results.get_child_content('mode')
+ if results.get_child_by_name('grow-threshold-percent'):
+ return_value['grow_threshold_percent'] = int(results.get_child_content('grow-threshold-percent'))
+ if results.get_child_by_name('increment-size'):
+ return_value['increment_size'] = results.get_child_content('increment-size')
+ if results.get_child_by_name('maximum-size'):
+ return_value['maximum_size'] = results.get_child_content('maximum-size')
+ if results.get_child_by_name('minimum-size'):
+ return_value['minimum_size'] = results.get_child_content('minimum-size')
+ if results.get_child_by_name('shrink-threshold-percent'):
+ return_value['shrink_threshold_percent'] = int(results.get_child_content('shrink-threshold-percent'))
+ if return_value == {}:
+ return_value = None
+ return return_value
+
+ def modify_volume_autosize(self, uuid=None):
+ """
+ Modify a Volumes autosize
+ :return:
+ """
+ if self.use_rest:
+ params = {}
+ data = {}
+ autosize = {}
+ if self.parameters.get('mode'):
+ autosize['mode'] = self.parameters['mode']
+ if self.parameters.get('grow_threshold_percent'):
+ autosize['grow_threshold'] = self.parameters['grow_threshold_percent']
+ if self.parameters.get('maximum_size'):
+ autosize['maximum'] = self.parameters['maximum_size']
+ if self.parameters.get('minimum_size'):
+ autosize['minimum'] = self.parameters['minimum_size']
+ if self.parameters.get('shrink_threshold_percent'):
+ autosize['shrink_threshold'] = self.parameters['shrink_threshold_percent']
+ data['autosize'] = autosize
+ api = "storage/volumes/" + uuid
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+
+ else:
+ volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-set')
+ volume_autosize_info.add_new_child('volume', self.parameters['volume'])
+ if self.parameters.get('mode'):
+ volume_autosize_info.add_new_child('mode', self.parameters['mode'])
+ if self.parameters.get('grow_threshold_percent'):
+ volume_autosize_info.add_new_child('grow-threshold-percent', str(self.parameters['grow_threshold_percent']))
+ if self.parameters.get('increment_size'):
+ volume_autosize_info.add_new_child('increment-size', self.parameters['increment_size'])
+ if self.parameters.get('reset') is not None:
+ volume_autosize_info.add_new_child('reset', str(self.parameters['reset']))
+ if self.parameters.get('maximum_size'):
+ volume_autosize_info.add_new_child('maximum-size', self.parameters['maximum_size'])
+ if self.parameters.get('minimum_size'):
+ volume_autosize_info.add_new_child('minimum-size', self.parameters['minimum_size'])
+ if self.parameters.get('shrink_threshold_percent'):
+ volume_autosize_info.add_new_child('shrink-threshold-percent', str(self.parameters['shrink_threshold_percent']))
+ try:
+ self.server.invoke_successfully(volume_autosize_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modify volume autosize for %s: %s" % (self.parameters["volume"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_to_kb(self, converted_parameters):
+ """
+ Save a coverted parameter
+ :param converted_parameters: Dic of all parameters
+ :return:
+ """
+ for attr in ['maximum_size', 'minimum_size', 'increment_size']:
+ if converted_parameters.get(attr):
+ if self.use_rest:
+ converted_parameters[attr] = self.convert_to_byte(attr, converted_parameters)
+ else:
+ converted_parameters[attr] = str(self.convert_to_kb(attr, converted_parameters))
+ return converted_parameters
+
+ def convert_to_kb(self, variable, converted_parameters):
+ """
+ Convert a number 10m in to its correct KB size
+ :param variable: the Parameter we are going to covert
+ :param converted_parameters: Dic of all parameters
+ :return:
+ """
+ if converted_parameters.get(variable)[-1] not in ['k', 'm', 'g', 't']:
+ self.module.fail_json(msg="%s must end with a k, m, g or t" % variable)
+ return self._size_unit_map[converted_parameters.get(variable)[-1]] * int(converted_parameters.get(variable)[:-1])
+
+ def convert_to_byte(self, variable, converted_parameters):
+ if converted_parameters.get(variable)[-1] not in ['k', 'm', 'g', 't']:
+ self.module.fail_json(msg="%s must end with a k, m, g or t" % variable)
+ return (self._size_unit_map[converted_parameters.get(variable)[-1]] * int(converted_parameters.get(variable)[:-1])) * 1024
+
+ def get_volume_uuid(self):
+ """
+ Get a volume's UUID
+ :return: uuid of the volume
+ """
+ params = {'fields': '*',
+ 'name': self.parameters['volume'],
+ 'svm.name': self.parameters['vserver']}
+ api = "storage/volumes"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ return message['records'][0]['uuid']
+
+ def apply(self):
+ # TODO Logging for rest
+ uuid = None
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_volume_autosize", self.server)
+ if self.use_rest:
+ # we only have the volume name, we need to the the uuid for the volume
+ uuid = self.get_volume_uuid()
+ current = self.get_volume_autosize(uuid=uuid)
+ converted_parameters = copy.deepcopy(self.parameters)
+ converted_parameters = self.modify_to_kb(converted_parameters)
+ self.na_helper.get_modified_attributes(current, converted_parameters)
+ if self.parameters.get('reset') is True:
+ self.na_helper.changed = True
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ self.modify_volume_autosize(uuid=uuid)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Apply volume autosize operations from playbook
+ :return:
+ """
+ obj = NetAppOntapVolumeAutosize()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py
new file mode 100644
index 00000000..f0a0ef43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_volume_clone
+short_description: NetApp ONTAP manage volume clones.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create NetApp ONTAP volume clones.
+- A FlexClone License is required to use this module
+options:
+ state:
+ description:
+ - Whether volume clone should be created.
+ choices: ['present']
+ type: str
+ default: 'present'
+ parent_volume:
+ description:
+ - The parent volume of the volume clone being created.
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the volume clone being created.
+ required: true
+ type: str
+ aliases:
+ - volume
+ vserver:
+ description:
+ - Vserver in which the volume clone should be created.
+ required: true
+ type: str
+ parent_snapshot:
+ description:
+ - Parent snapshot in which volume clone is created off.
+ type: str
+ parent_vserver:
+ description:
+ - Vserver of parent volume in which clone is created off.
+ type: str
+ qos_policy_group_name:
+ description:
+ - The qos-policy-group-name which should be set for volume clone.
+ type: str
+ space_reserve:
+ description:
+ - The space_reserve setting which should be used for the volume clone.
+ choices: ['volume', 'none']
+ type: str
+ volume_type:
+ description:
+ - The volume-type setting which should be used for the volume clone.
+ choices: ['rw', 'dp']
+ type: str
+ junction_path:
+ version_added: 2.8.0
+ description:
+ - Junction path of the volume.
+ type: str
+ uid:
+ version_added: 2.9.0
+ description:
+ - The UNIX user ID for the clone volume.
+ type: int
+ gid:
+ version_added: 2.9.0
+ description:
+ - The UNIX group ID for the clone volume.
+ type: int
+ split:
+ version_added: '20.2.0'
+ description:
+ - Split clone volume from parent volume.
+ type: bool
+'''
+
+EXAMPLES = """
+ - name: create volume clone
+ na_ontap_volume_clone:
+ state: present
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ vserver: vs_hack
+ parent_volume: normal_volume
+ name: clone_volume_7
+ space_reserve: none
+ parent_snapshot: backup1
+ junction_path: /clone_volume_7
+ uid: 1
+ gid: 1
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPVolumeClone(object):
+ """
+ Creates a volume clone
+ """
+
+ def __init__(self):
+ """
+ Initialize the NetAppOntapVolumeClone class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ parent_volume=dict(required=True, type='str'),
+ name=dict(required=True, type='str', aliases=["volume"]),
+ vserver=dict(required=True, type='str'),
+ parent_snapshot=dict(required=False, type='str', default=None),
+ parent_vserver=dict(required=False, type='str', default=None),
+ qos_policy_group_name=dict(required=False, type='str', default=None),
+ space_reserve=dict(required=False, type='str', choices=['volume', 'none'], default=None),
+ volume_type=dict(required=False, type='str', choices=['rw', 'dp']),
+ junction_path=dict(required=False, type='str', default=None),
+ uid=dict(required=False, type='int'),
+ gid=dict(required=False, type='int'),
+ split=dict(required=False, type='bool', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ['uid', 'gid']
+ ]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ if self.parameters.get('parent_vserver'):
+ # use cluster ZAPI, as vserver ZAPI does not support parent-vserser for create
+ self.create_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # keep vserver for ems log and clone-get
+ self.vserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ else:
+ self.vserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ self.create_server = self.vserver
+ return
+
+ def create_volume_clone(self):
+ """
+ Creates a new volume clone
+ """
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-create')
+ clone_obj.add_new_child("parent-volume", self.parameters['parent_volume'])
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ if self.parameters.get('qos_policy_group_name'):
+ clone_obj.add_new_child("qos-policy-group-name", self.parameters['qos_policy_group_name'])
+ if self.parameters.get('space_reserve'):
+ clone_obj.add_new_child("space-reserve", self.parameters['space_reserve'])
+ if self.parameters.get('parent_snapshot'):
+ clone_obj.add_new_child("parent-snapshot", self.parameters['parent_snapshot'])
+ if self.parameters.get('parent_vserver'):
+ clone_obj.add_new_child("parent-vserver", self.parameters['parent_vserver'])
+ clone_obj.add_new_child("vserver", self.parameters['vserver'])
+ if self.parameters.get('volume_type'):
+ clone_obj.add_new_child("volume-type", self.parameters['volume_type'])
+ if self.parameters.get('junction_path'):
+ clone_obj.add_new_child("junction-path", self.parameters['junction_path'])
+ if self.parameters.get('uid'):
+ clone_obj.add_new_child("uid", str(self.parameters['uid']))
+ clone_obj.add_new_child("gid", str(self.parameters['gid']))
+ try:
+ self.create_server.invoke_successfully(clone_obj, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error creating volume clone: %s: %s' %
+ (self.parameters['name'], to_native(exc)), exception=traceback.format_exc())
+ if 'split' in self.parameters and self.parameters['split']:
+ self.start_volume_clone_split()
+
+ def modify_volume_clone(self):
+ """
+ Modify an existing volume clone
+ """
+ if 'split' in self.parameters and self.parameters['split']:
+ self.start_volume_clone_split()
+
+ def start_volume_clone_split(self):
+ """
+ Starts a volume clone split
+ """
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-split-start')
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ try:
+ self.vserver.invoke_successfully(clone_obj, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error starting volume clone split: %s: %s' %
+ (self.parameters['name'], to_native(exc)), exception=traceback.format_exc())
+
+ def get_volume_clone(self):
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-get')
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ current = None
+ try:
+ results = self.vserver.invoke_successfully(clone_obj, True)
+ if results.get_child_by_name('attributes'):
+ attributes = results.get_child_by_name('attributes')
+ info = attributes.get_child_by_name('volume-clone-info')
+ current = {}
+ # Check if clone is currently splitting. Whilst a split is in
+ # progress, these attributes are present in 'volume-clone-info':
+ # block-percentage-complete, blocks-scanned & blocks-updated.
+ if info.get_child_by_name('block-percentage-complete') or \
+ info.get_child_by_name('blocks-scanned') or \
+ info.get_child_by_name('blocks-updated'):
+ current["split"] = True
+ else:
+ # Clone hasn't been split.
+ current["split"] = False
+ return current
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 denotes a volume clone not being found.
+ if to_native(error.code) == "15661":
+ pass
+ else:
+ self.module.fail_json(msg='Error fetching volume clone information %s: %s' %
+ (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+ return None
+
+ def apply(self):
+ """
+ Run Module based on playbook
+ """
+ netapp_utils.ems_log_event("na_ontap_volume_clone", self.vserver)
+ current = self.get_volume_clone()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_volume_clone()
+ if modify:
+ self.modify_volume_clone()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Volume Clone object and runs the correct play task
+ """
+ obj = NetAppONTAPVolumeClone()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py
new file mode 100644
index 00000000..084c6fe4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_volume_snaplock
+
+short_description: NetApp ONTAP manage volume snaplock retention.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.2.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modifies the snaplock retention of volumes on NetApp ONTAP.
+options:
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ default_retention_period:
+ description:
+ - Specifies the default retention period that will be applied.
+ - The format is "<number> <units>" for example "10 days", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ - If this option is specified as "max", then maximum_retention_period will be used as the default retention period.
+ type: str
+
+ autocommit_period:
+ description:
+ - Specifies the autocommit-period for the snaplock volume.
+ - The format is "<number> <units>" for example "8 hours", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+ is_volume_append_mode_enabled:
+ description:
+ - Specifies if the volume append mode must be enabled or disabled.
+ - It can be modified only when the volume is not mounted and does not have any data or Snapshot copy.
+ - Volume append mode is not supported on SnapLock audit log volumes.
+ - When it is enabled, all files created with write permissions on the volume will be WORM appendable files by default.
+ - All WORM appendable files not modified for a period greater than the autocommit period of the volume are also committed to WORM read-only state.
+ type: bool
+
+ maximum_retention_period:
+ description:
+ - Specifies the allowed maximum retention period that will be applied.
+ - The format is "<number> <units>" for example "2 years", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+ minimum_retention_period:
+ description:
+ - Specifies the allowed minimum retention period that will be applied.
+ - The format is "<number> <units>" for example "1 days", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Set volume snaplock
+ na_ontap_volume_snaplock:
+ vserver: svm
+ name: ansibleVolume
+ default_retention_period: "5 days"
+ minimum_retention_period: "0 years"
+ maximum_retention_period: "10 days"
+ is_volume_append_mode_enabled: False
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVolumeSnaplock(object):
+ '''Class with volume operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ default_retention_period=dict(required=False, type='str'),
+ maximum_retention_period=dict(required=False, type='str'),
+ minimum_retention_period=dict(required=False, type='str'),
+ autocommit_period=dict(required=False, type='str'),
+ is_volume_append_mode_enabled=dict(required=False, type='bool'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_volume_snaplock_attrs(self):
+ """
+ Return volume-get-snaplock-attrs query results
+ :param vol_name: name of the volume
+ :return: dict of the volume snaplock attrs
+ """
+ volume_snaplock = netapp_utils.zapi.NaElement('volume-get-snaplock-attrs')
+ volume_snaplock.add_new_child('volume', self.parameters['name'])
+
+ try:
+ result = self.server.invoke_successfully(volume_snaplock, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snaplock attributes for volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+
+ if result.get_child_by_name('snaplock-attrs'):
+ volume_snaplock_attributes = result['snaplock-attrs']['snaplock-attrs-info']
+ return_value = {
+ 'autocommit_period': volume_snaplock_attributes['autocommit-period'],
+ 'default_retention_period': volume_snaplock_attributes['default-retention-period'],
+ 'is_volume_append_mode_enabled': self.na_helper.get_value_for_bool(True, volume_snaplock_attributes['is-volume-append-mode-enabled']),
+ 'maximum_retention_period': volume_snaplock_attributes['maximum-retention-period'],
+ 'minimum_retention_period': volume_snaplock_attributes['minimum-retention-period'],
+ }
+ return return_value
+
+ def set_volume_snaplock_attrs(self, modify):
+ '''Set ONTAP volume snaplock attributes'''
+ volume_snaplock_obj = netapp_utils.zapi.NaElement('volume-set-snaplock-attrs')
+ volume_snaplock_obj.add_new_child('volume', self.parameters['name'])
+ if modify.get('autocommit_period') is not None:
+ volume_snaplock_obj.add_new_child('autocommit-period', self.parameters['autocommit_period'])
+ if modify.get('default_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('default-retention-period', self.parameters['default_retention_period'])
+ if modify.get('is_volume_append_mode_enabled') is not None:
+ volume_snaplock_obj.add_new_child('is-volume-append-mode-enabled',
+ self.na_helper.get_value_for_bool(False, self.parameters['is_volume_append_mode_enabled']))
+ if modify.get('maximum_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('maximum-retention-period', self.parameters['maximum_retention_period'])
+ if modify.get('minimum_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('minimum-retention-period', self.parameters['minimum_retention_period'])
+ try:
+ self.server.invoke_successfully(volume_snaplock_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting snaplock attributes for volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_volume_snaplock", self.server)
+ current, modify = self.get_volume_snaplock_attrs(), None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.set_volume_snaplock_attrs(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Set volume snaplock attributes from playbook'''
+ obj = NetAppOntapVolumeSnaplock()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py
new file mode 100644
index 00000000..4ac35fce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_vscan
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan
+short_description: NetApp ONTAP Vscan enable/disable.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+notes:
+- on demand task, on_access_policy and scanner_pools must be set up before running this module
+description:
+- Enable and Disable Vscan
+options:
+ enable:
+ description:
+ - Whether to enable to disable a Vscan
+ type: bool
+ default: True
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Enable Vscan
+ na_ontap_vscan:
+ enable: True
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: trident_svm
+
+ - name: Disable Vscan
+ na_ontap_vscan:
+ enable: False
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: trident_svm
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscan(object):
+ ''' enable/disable vscan '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ enable=dict(type='bool', default=True),
+ vserver=dict(required=True, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_vscan(self):
+ if self.use_rest:
+ params = {'fields': 'svm,enabled',
+ "svm.name": self.parameters['vserver']}
+ api = "protocols/vscan"
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ return message['records'][0]
+ else:
+ vscan_status_iter = netapp_utils.zapi.NaElement('vscan-status-get-iter')
+ vscan_status_info = netapp_utils.zapi.NaElement('vscan-status-info')
+ vscan_status_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(vscan_status_info)
+ vscan_status_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(vscan_status_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting Vscan info for Vserver %s: %s' %
+ (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result.get_child_by_name('attributes-list').get_child_by_name('vscan-status-info')
+
+ def enable_vscan(self, uuid=None):
+ if self.use_rest:
+ params = {"svm.name": self.parameters['vserver']}
+ data = {"enabled": self.parameters['enable']}
+ api = "protocols/vscan/" + uuid
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ else:
+ vscan_status_obj = netapp_utils.zapi.NaElement("vscan-status-modify")
+ vscan_status_obj.add_new_child('is-vscan-enabled', str(self.parameters['enable']))
+ try:
+ self.server.invoke_successfully(vscan_status_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error Enable/Disabling Vscan: %s" % to_native(error), exception=traceback.format_exc())
+
+ def asup_log(self):
+ if self.use_rest:
+ # TODO: logging for Rest
+ return
+ else:
+ # Either we are using ZAPI, or REST failed when it should not
+ try:
+ netapp_utils.ems_log_event("na_ontap_vscan", self.server)
+ except Exception:
+ # TODO: we may fail to connect to REST or ZAPI, the line below shows REST issues only
+ # self.module.fail_json(msg=repr(self.rest_api.errors), log=repr(self.rest_api.debug_logs))
+ pass
+
+ def apply(self):
+ changed = False
+ self.asup_log()
+ current = self.get_vscan()
+ if self.use_rest:
+ if current['enabled'] != self.parameters['enable']:
+ if not self.module.check_mode:
+ self.enable_vscan(current['svm']['uuid'])
+ changed = True
+ else:
+ if current.get_child_content('is-vscan-enabled') != str(self.parameters['enable']).lower():
+ if not self.module.check_mode:
+ self.enable_vscan()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscan()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py
new file mode 100644
index 00000000..b523ae8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_on_access_policy
+short_description: NetApp ONTAP Vscan on access policy configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure on access policy for Vscan (virus scan)
+options:
+ state:
+ description:
+ - Whether a Vscan on Access policy is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ policy_name:
+ description:
+ - The name of the policy
+ required: true
+ type: str
+
+ file_ext_to_exclude:
+ description:
+ - File extensions for which On-Access scanning must not be performed.
+ type: list
+ elements: str
+
+ file_ext_to_include:
+ description:
+ - File extensions for which On-Access scanning is considered. The default value is '*', which means that all files are considered for scanning except
+ - those which are excluded from scanning.
+ type: list
+ elements: str
+
+ filters:
+ description:
+ - A list of filters which can be used to define the scope of the On-Access policy more precisely. The filters can be added in any order. Possible values
+ - scan_ro_volume Enable scans for read-only volume,
+ - scan_execute_access Scan only files opened with execute-access (CIFS only)
+ type: list
+ elements: str
+
+ is_scan_mandatory:
+ description:
+ - Specifies whether access to a file is allowed if there are no external virus-scanning servers available for virus scanning. It is true if not provided at
+ the time of creating a policy.
+ type: bool
+
+ max_file_size:
+ description:
+ - Max file-size (in bytes) allowed for scanning. The default value of 2147483648 (2GB) is taken if not provided at the time of creating a policy.
+ type: int
+
+ paths_to_exclude:
+ description:
+ - File paths for which On-Access scanning must not be performed.
+ type: list
+ elements: str
+
+ scan_files_with_no_ext:
+ description:
+ - Specifies whether files without any extension are considered for scanning or not.
+ default: true
+ type: bool
+
+ policy_status:
+ description:
+ - Status for the created policy
+ default: false
+ type: bool
+ version_added: 20.8.0
+'''
+
+EXAMPLES = """
+ - name: Create Vscan On Access Policy
+ na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml']
+ - name: Create Vscan On Access Policy with Policy Status enabled
+ na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml']
+ policy_status: True
+ - name: modify Vscan on Access Policy
+ na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml', 'py']
+ - name: Delete On Access Policy
+ na_ontap_vscan_on_access_policy:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscanOnAccessPolicy(object):
+ """
+ Create/Modify/Delete a Vscan OnAccess policy
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ file_ext_to_exclude=dict(required=False, type='list', elements='str'),
+ file_ext_to_include=dict(required=False, type='list', elements='str'),
+ filters=dict(required=False, type='list', elements='str'),
+ is_scan_mandatory=dict(required=False, type='bool', default=False),
+ max_file_size=dict(required=False, type="int"),
+ paths_to_exclude=dict(required=False, type='list', elements='str'),
+ scan_files_with_no_ext=dict(required=False, type='bool', default=True),
+ policy_status=dict(required=False, type='bool')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ self.state = parameters['state']
+ self.vserver = parameters['vserver']
+ self.policy_name = parameters['policy_name']
+ self.file_ext_to_exclude = parameters['file_ext_to_exclude']
+ self.file_ext_to_include = parameters['file_ext_to_include']
+ self.filters = parameters['filters']
+ self.is_scan_mandatory = parameters['is_scan_mandatory']
+ self.max_file_size = parameters['max_file_size']
+ self.paths_to_exclude = parameters['paths_to_exclude']
+ self.scan_files_with_no_ext = parameters['scan_files_with_no_ext']
+ self.policy_status = parameters['policy_status']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def exists_access_policy(self, policy_obj=None):
+ """
+ Check if a Vscan Access policy exists
+ :return: True if Exist, False if it does not
+ """
+ if policy_obj is None:
+ policy_obj = self.return_on_access_policy()
+ if policy_obj:
+ return True
+ else:
+ return False
+
+ def return_on_access_policy(self):
+ """
+ Return a Vscan on Access Policy
+ :return: None if there is no access policy, return the policy if there is
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-get-iter')
+ access_policy_info = netapp_utils.zapi.NaElement('vscan-on-access-policy-info')
+ access_policy_info.add_new_child('policy-name', self.policy_name)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(access_policy_info)
+ access_policy_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records'):
+ if int(result.get_child_content('num-records')) == 1:
+ return result
+ elif int(result.get_child_content('num-records')) > 1:
+ self.module.fail_json(msg='Mutiple Vscan on Access Policy matching %s:' % self.policy_name)
+ return None
+
+ def create_on_access_policy(self):
+ """
+ Create a Vscan on Access policy
+ :return: none
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-create')
+ access_policy_obj.add_new_child('policy-name', self.policy_name)
+ access_policy_obj.add_new_child('protocol', 'cifs')
+ access_policy_obj = self._fill_in_access_policy(access_policy_obj)
+
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+
+ def status_modify_on_access_policy(self):
+ """
+ Update the status of policy
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-status-modify')
+ access_policy_obj.add_new_child('policy-name', self.policy_name)
+ access_policy_obj.add_new_child('policy-status', str(self.policy_status).lower())
+
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying status Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+
+ def delete_on_access_policy(self):
+ """
+ Delete a Vscan On Access Policy
+ :return:
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-delete')
+ access_policy_obj.add_new_child('policy-name', self.policy_name)
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Deleting Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+
+ def modify_on_access_policy(self):
+ """
+ Modify a Vscan On Access policy
+ :return: nothing
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-modify')
+ access_policy_obj.add_new_child('policy-name', self.policy_name)
+ access_policy_obj = self._fill_in_access_policy(access_policy_obj)
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Modifying Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+
+ def _fill_in_access_policy(self, access_policy_obj):
+ if self.is_scan_mandatory is not None:
+ access_policy_obj.add_new_child('is-scan-mandatory', str(self.is_scan_mandatory).lower())
+ if self.max_file_size:
+ access_policy_obj.add_new_child('max-file-size', str(self.max_file_size))
+ if self.scan_files_with_no_ext is not None:
+ access_policy_obj.add_new_child('scan-files-with-no-ext', str(self.scan_files_with_no_ext))
+ if self.file_ext_to_exclude:
+ ext_obj = netapp_utils.zapi.NaElement('file-ext-to-exclude')
+ access_policy_obj.add_child_elem(ext_obj)
+ for extension in self.file_ext_to_exclude:
+ ext_obj.add_new_child('file-extension', extension)
+ if self.file_ext_to_include:
+ ext_obj = netapp_utils.zapi.NaElement('file-ext-to-include')
+ access_policy_obj.add_child_elem(ext_obj)
+ for extension in self.file_ext_to_include:
+ ext_obj.add_new_child('file-extension', extension)
+ if self.filters:
+ ui_filter_obj = netapp_utils.zapi.NaElement('filters')
+ access_policy_obj.add_child_elem(ui_filter_obj)
+ for filter in self.filters:
+ ui_filter_obj.add_new_child('vscan-on-access-policy-ui-filter', filter)
+ if self.paths_to_exclude:
+ path_obj = netapp_utils.zapi.NaElement('paths-to-exclude')
+ access_policy_obj.add_child_elem(path_obj)
+ for path in self.paths_to_exclude:
+ path_obj.add_new_child('file-path', path)
+ return access_policy_obj
+
+ def has_policy_changed(self):
+ results = self.return_on_access_policy()
+ if results is None:
+ return False
+ try:
+ policy_obj = results.get_child_by_name('attributes-list').get_child_by_name('vscan-on-access-policy-info')
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Accessing on access policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+ if self.is_scan_mandatory is not None:
+ if str(self.is_scan_mandatory).lower() != policy_obj.get_child_content('is-scan-mandatory'):
+ return True
+ if self.policy_status is not None:
+ if str(self.policy_status).lower() != policy_obj.get_child_content('is-policy-enabled'):
+ return True
+ if self.max_file_size:
+ if self.max_file_size != int(policy_obj.get_child_content('max-file-size')):
+ return True
+ if self.scan_files_with_no_ext is not None:
+ if str(self.scan_files_with_no_ext).lower() != policy_obj.get_child_content('scan-files-with-no-ext'):
+ return True
+ if self.file_ext_to_exclude:
+ # if no file-ext-to-exclude are given at creation, XML will not have a file-ext-to-exclude
+ if policy_obj.get_child_by_name('file-ext-to-exclude') is None:
+ return True
+ current_to_exclude = []
+ for each in policy_obj.get_child_by_name('file-ext-to-exclude').get_children():
+ current_to_exclude.append(each.get_content())
+ k = self._diff(self.file_ext_to_exclude, current_to_exclude)
+ # If the diff returns something the lists don't match and the policy has changed
+ if k:
+ return True
+ if self.file_ext_to_include:
+ # if no file-ext-to-include are given at creation, XML will not have a file-ext-to-include
+ if policy_obj.get_child_by_name('file-ext-to-include') is None:
+ return True
+ current_to_include = []
+ for each in policy_obj.get_child_by_name('file-ext-to-include').get_children():
+ current_to_include.append(each.get_content())
+ k = self._diff(self.file_ext_to_include, current_to_include)
+ # If the diff returns something the lists don't match and the policy has changed
+ if k:
+ return True
+ if self.filters:
+ if policy_obj.get_child_by_name('filters') is None:
+ return True
+ current_filters = []
+ for each in policy_obj.get_child_by_name('filters').get_children():
+ current_filters.append(each.get_content())
+ k = self._diff(self.filters, current_filters)
+ # If the diff returns something the lists don't match and the policy has changed
+ if k:
+ return True
+ if self.paths_to_exclude:
+ if policy_obj.get_child_by_name('paths-to-exclude') is None:
+ return True
+ current_paths_to_exlude = []
+ for each in policy_obj.get_child_by_name('paths-to-exclude').get_children():
+ current_paths_to_exlude.append(each.get_content())
+ k = self._diff(self.paths_to_exclude, current_paths_to_exlude)
+ # If the diff returns something the lists don't match and the policy has changed
+ if k:
+ return True
+ return False
+
+ def _diff(self, li1, li2):
+ """
+ :param li1: list 1
+ :param li2: list 2
+ :return: a list contain items that are not on both lists
+ """
+ li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2]
+ return li_dif
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_vscan_on_access_policy", self.server)
+ changed = False
+ policy_obj = self.return_on_access_policy()
+ if self.state == 'present':
+ if not self.exists_access_policy(policy_obj):
+ if not self.module.check_mode:
+ self.create_on_access_policy()
+ if self.policy_status:
+ self.status_modify_on_access_policy()
+ changed = True
+ else:
+ # Check if anything has changed first.
+ if self.has_policy_changed():
+ if not self.module.check_mode:
+ result = policy_obj.get_child_by_name('attributes-list').get_child_by_name('vscan-on-access-policy-info')
+ if str(self.policy_status).lower() != result.get_child_content('is-policy-enabled'):
+ if self.policy_status is not None:
+ self.status_modify_on_access_policy()
+ self.modify_on_access_policy()
+ changed = True
+ if self.state == 'absent':
+ if self.exists_access_policy(policy_obj):
+ if not self.module.check_mode:
+ result = policy_obj.get_child_by_name('attributes-list').get_child_by_name('vscan-on-access-policy-info')
+ if result.get_child_content('is-policy-enabled') == 'true':
+ self.status_modify_on_access_policy()
+ self.delete_on_access_policy()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanOnAccessPolicy()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py
new file mode 100644
index 00000000..80c4401f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_on_demand_task
+short_description: NetApp ONTAP Vscan on demand task configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure on demand task for Vscan
+options:
+ state:
+ description:
+ - Whether a Vscan on demand task is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ cross_junction:
+ description:
+ - Specifies whether the On-Demand task is allowed to cross volume junctions
+ type: bool
+ default: False
+
+ directory_recursion:
+ description:
+ - Specifies whether the On-Demand task is allowed to recursively scan through sub-directories.
+ type: bool
+ default: False
+
+ file_ext_to_exclude:
+ description:
+ - File-Extensions for which scanning must not be performed.
+ - File whose extension matches with both inclusion and exclusion list is not considered for scanning.
+ type: list
+ elements: str
+
+ file_ext_to_include:
+ description:
+ - File extensions for which scanning is considered.
+ - The default value is '*', which means that all files are considered for scanning except those which are excluded from scanning.
+ - File whose extension matches with both inclusion and exclusion list is not considered for scanning.
+ type: list
+ elements: str
+
+ max_file_size:
+ description:
+ - Max file-size (in bytes) allowed for scanning. The default value of 10737418240 (10GB) is taken if not provided at the time of creating a task.
+ type: str
+
+ paths_to_exclude:
+ description:
+ - File-paths for which scanning must not be performed.
+ type: list
+ elements: str
+
+ report_directory:
+ description:
+ - Path from the vserver root where task report is created. The path must be a directory and provided in unix-format from the root of the Vserver.
+ - Example /vol1/on-demand-reports.
+ type: str
+
+ report_log_level:
+ description:
+ - Log level for the On-Demand report.
+ choices: ['verbose', 'info', 'error']
+ type: str
+ default: error
+
+ request_timeout:
+ description:
+ - Total request-service time-limit in seconds. If the virus-scanner does not respond within the provided time, scan will be timedout.
+ type: str
+
+ scan_files_with_no_ext:
+ description:
+ - Specifies whether files without any extension are considered for scanning or not.
+ type: bool
+ default: True
+
+ scan_paths:
+ description:
+ - List of paths that need to be scanned. The path must be provided in unix-format and from the root of the Vserver.
+ - Example /vol1/large_files.
+ type: list
+ elements: str
+
+ scan_priority:
+ description:
+ - Priority of the On-Demand scan requests generated by this task.
+ choices: ['low', 'normal']
+ type: str
+ default: low
+
+ schedule:
+ description:
+ - Schedule of the task. The task will be run as per the schedule.
+ - For running the task immediately, vscan-on-demand-task-run api must be used after creating a task.
+ type: str
+
+ task_name:
+ description:
+ - Name of the task.
+ type: str
+ required: True
+'''
+
+
+EXAMPLES = """
+ - name: Create Vscan On Demand Task
+ na_ontap_vscan_on_demand_task:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ task_name: carchiOnDemand
+ scan_paths: /
+ report_directory: /
+ file_ext_to_exclude: ['py', 'yml']
+ max_file_size: 10737418241
+ paths_to_exclude: ['/tmp', '/var']
+ report_log_level: info
+ request_timeout: 60
+
+ - name: Delete Vscan On Demand Task
+ na_ontap_vscan_on_demand_task:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ task_name: carchiOnDemand
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscanOnDemandTask(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ cross_junction=dict(required=False, type='bool', default=False),
+ directory_recursion=dict(required=False, type='bool', default=False),
+ file_ext_to_exclude=dict(required=False, type='list', elements='str'),
+ file_ext_to_include=dict(required=False, type='list', elements='str'),
+ max_file_size=dict(required=False, type="str"),
+ paths_to_exclude=dict(required=False, type='list', elements='str'),
+ report_directory=dict(required=False, type='str'),
+ report_log_level=dict(required=False, type='str', choices=['verbose', 'info', 'error'], default='error'),
+ request_timeout=dict(required=False, type='str'),
+ scan_files_with_no_ext=dict(required=False, type='bool', default=True),
+ scan_paths=dict(required=False, type='list', elements='str'),
+ scan_priority=dict(required=False, type='str', choices=['low', 'normal'], default='low'),
+ schedule=dict(required=False, type="str"),
+ task_name=dict(required=True, type="str")
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ["state", "present", ["report_directory", "scan_paths"]]
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_demand_task(self):
+ """
+ Get a demand task
+ :return: A vscan-on-demand-task-info or None
+ """
+ demand_task_iter = netapp_utils.zapi.NaElement("vscan-on-demand-task-get-iter")
+ demand_task_info = netapp_utils.zapi.NaElement("vscan-on-demand-task-info")
+ demand_task_info.add_new_child('task-name', self.parameters['task_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(demand_task_info)
+ demand_task_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(demand_task_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Vscan on demand task %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result.get_child_by_name('attributes-list').get_child_by_name('vscan-on-demand-task-info')
+ return None
+
+ def create_demand_task(self):
+ """
+ Create a Demand Task
+ :return: None
+ """
+ demand_task_obj = netapp_utils.zapi.NaElement("vscan-on-demand-task-create")
+ # Required items first
+ demand_task_obj.add_new_child('report-directory', self.parameters['report_directory'])
+ demand_task_obj.add_new_child('task-name', self.parameters['task_name'])
+ scan_paths = netapp_utils.zapi.NaElement("scan-paths")
+ for scan_path in self.parameters['scan_paths']:
+ scan_paths.add_new_child('string', scan_path)
+ demand_task_obj.add_child_elem(scan_paths)
+ # Optional items next
+ if self.parameters.get('cross_junction'):
+ demand_task_obj.add_new_child('cross-junction', str(self.parameters['cross_junction']).lower())
+ if self.parameters.get('directory_recursion'):
+ demand_task_obj.add_new_child('directory-recursion', str(self.parameters['directory_recursion']).lower())
+ if self.parameters.get('file_ext_to_exclude'):
+ ext_to_exclude_obj = netapp_utils.zapi.NaElement('file-ext-to-exclude')
+ for exclude_file in self.parameters['file_ext_to_exclude']:
+ ext_to_exclude_obj.add_new_child('file-extension', exclude_file)
+ demand_task_obj.add_child_elem(ext_to_exclude_obj)
+ if self.parameters.get('file_ext_to_include'):
+ ext_to_include_obj = netapp_utils.zapi.NaElement('file-ext-to-include')
+ for include_file in self.parameters['file_ext_to_exclude']:
+ ext_to_include_obj.add_child_elem(include_file)
+ demand_task_obj.add_child_elem(ext_to_include_obj)
+ if self.parameters.get('max_file_size'):
+ demand_task_obj.add_new_child('max-file-size', self.parameters['max_file_size'])
+ if self.parameters.get('paths_to_exclude'):
+ exclude_paths = netapp_utils.zapi.NaElement('paths-to-exclude')
+ for path in self.parameters['paths_to_exclude']:
+ exclude_paths.add_new_child('string', path)
+ demand_task_obj.add_child_elem(exclude_paths)
+ if self.parameters.get('report_log_level'):
+ demand_task_obj.add_new_child('report-log-level', self.parameters['report_log_level'])
+ if self.parameters.get('request_timeout'):
+ demand_task_obj.add_new_child('request-timeout', self.parameters['request_timeout'])
+ if self.parameters.get('scan_files_with_no_ext'):
+ demand_task_obj.add_new_child('scan-files-with-no-ext', str(self.parameters['scan_files_with_no_ext']).lower())
+ if self.parameters.get('scan_priority'):
+ demand_task_obj.add_new_child('scan-priority', self.parameters['scan_priority'].lower())
+ if self.parameters.get('schedule'):
+ demand_task_obj.add_new_child('schedule', self.parameters['schedule'])
+ try:
+ self.server.invoke_successfully(demand_task_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating on demand task %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_demand_task(self):
+ """
+ Delete a Demand Task"
+ :return:
+ """
+ demand_task_obj = netapp_utils.zapi.NaElement('vscan-on-demand-task-delete')
+ demand_task_obj.add_new_child('task-name', self.parameters['task_name'])
+ try:
+ self.server.invoke_successfully(demand_task_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting on demand task, %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_vscan_on_demand_task")
+ current = self.get_demand_task()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_demand_task()
+ elif cd_action == 'delete':
+ self.delete_demand_task()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanOnDemandTask()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py
new file mode 100644
index 00000000..03919e68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_vscan_scanner_pool
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_scanner_pool
+short_description: NetApp ONTAP Vscan Scanner Pools Configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete a Vscan Scanner Pool
+options:
+ state:
+ description:
+ - Whether a Vscan Scanner pool is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ hostnames:
+ description:
+ - List of hostnames of Vscan servers which are allowed to connect to Data ONTAP
+ type: list
+ elements: str
+
+ privileged_users:
+ description:
+ - List of privileged usernames. Username must be in the form "domain-name\\user-name"
+ type: list
+ elements: str
+
+ scanner_pool:
+ description:
+ - the name of the virus scanner pool
+ required: true
+ type: str
+
+ scanner_policy:
+ description:
+ - The name of the Virus scanner Policy
+ choices: ['primary', 'secondary', 'idle']
+ type: str
+'''
+
+EXAMPLES = """
+- name: Create and enable Scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ hostnames: ['name', 'name2']
+ privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi']
+ scanner_pool: Scanner1
+ scanner_policy: primary
+
+- name: Modify scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ hostnames: ['name', 'name2', 'name3']
+ privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi', 'sim.rtp.openeng.netapp.com\\chuyic']
+ scanner_pool: Scanner1
+
+- name: Delete a scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ scanner_pool: Scanner1
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscanScannerPool(object):
+ ''' create, modify, delete vscan scanner pool '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ hostnames=dict(required=False, type='list', elements='str'),
+ privileged_users=dict(required=False, type='list', elements='str'),
+ scanner_pool=dict(required=True, type='str'),
+ scanner_policy=dict(required=False, type='str', choices=['primary', 'secondary', 'idle'])
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def create_scanner_pool(self):
+ """
+ Create a Vscan Scanner Pool
+ :return: nothing
+ """
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-create')
+ if self.parameters['hostnames']:
+ string_obj = netapp_utils.zapi.NaElement('hostnames')
+ scanner_pool_obj.add_child_elem(string_obj)
+ for hostname in self.parameters['hostnames']:
+ string_obj.add_new_child('string', hostname)
+ if self.parameters['privileged_users']:
+ users_obj = netapp_utils.zapi.NaElement('privileged-users')
+ scanner_pool_obj.add_child_elem(users_obj)
+ for user in self.parameters['privileged_users']:
+ users_obj.add_new_child('privileged-user', user)
+ scanner_pool_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_policy'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply_policy(self):
+ """
+ Apply a Scanner policy to a Scanner pool
+ :return: nothing
+ """
+ apply_policy_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-apply-policy')
+ apply_policy_obj.add_new_child('scanner-policy', self.parameters['scanner_policy'])
+ apply_policy_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(apply_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error appling policy %s to pool %s: %s' %
+ (self.parameters['scanner_policy'], self.parameters['scanner_policy'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_scanner_pool(self):
+ """
+ Check to see if a scanner pool exist or not
+ :return: True if it exist, False if it does not
+ """
+ return_value = None
+ if self.use_rest:
+ pass
+ else:
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-get-iter')
+ scanner_pool_info = netapp_utils.zapi.NaElement('vscan-scanner-pool-info')
+ scanner_pool_info.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ scanner_pool_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(scanner_pool_info)
+ scanner_pool_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ if result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info').get_child_content(
+ 'scanner-pool') == self.parameters['scanner_pool']:
+ scanner_pool_obj = result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info')
+ hostname = [host.get_content() for host in
+ scanner_pool_obj.get_child_by_name('hostnames').get_children()]
+ privileged_users = [user.get_content() for user in
+ scanner_pool_obj.get_child_by_name('privileged-users').get_children()]
+ return_value = {
+ 'hostnames': hostname,
+ 'enable': scanner_pool_obj.get_child_content('is-currently-active'),
+ 'privileged_users': privileged_users,
+ 'scanner_pool': scanner_pool_obj.get_child_content('scanner-pool'),
+ 'scanner_policy': scanner_pool_obj.get_child_content('scanner-policy')
+ }
+ return return_value
+
+ def delete_scanner_pool(self):
+ """
+ Delete a Scanner pool
+ :return: nothing
+ """
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-delete')
+ scanner_pool_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_scanner_pool(self, modify):
+ """
+ Modify a scanner pool
+ :return: nothing
+ """
+ vscan_pool_modify = netapp_utils.zapi.NaElement('vscan-scanner-pool-modify')
+ vscan_pool_modify.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ for key in modify:
+ if key == 'privileged_users':
+ users_obj = netapp_utils.zapi.NaElement('privileged-users')
+ vscan_pool_modify.add_child_elem(users_obj)
+ for user in modify['privileged_users']:
+ users_obj.add_new_child('privileged-user', user)
+ elif key == 'hostnames':
+ string_obj = netapp_utils.zapi.NaElement('hostnames')
+ vscan_pool_modify.add_child_elem(string_obj)
+ for hostname in modify['hostnames']:
+ string_obj.add_new_child('string', hostname)
+ elif key != 'scanner_policy':
+ vscan_pool_modify.add_new_child(self.attribute_to_name(key), str(modify[key]))
+
+ try:
+ self.server.invoke_successfully(vscan_pool_modify, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ if self.use_rest:
+ # TODO: logging for Rest
+ return
+ else:
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_vscan_scanner_pool")
+ scanner_pool_obj = self.get_scanner_pool()
+ cd_action = self.na_helper.get_cd_action(scanner_pool_obj, self.parameters)
+ if self.parameters['state'] == 'present' and cd_action is None:
+ # TODO We need to update the module to support modify
+ modify = self.na_helper.get_modified_attributes(scanner_pool_obj, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_scanner_pool()
+ if self.parameters.get('scanner_policy') is not None:
+ self.apply_policy()
+ elif cd_action == 'delete':
+ self.delete_scanner_pool()
+ elif modify:
+ self.modify_scanner_pool(modify)
+ if self.parameters.get('scanner_policy') is not None:
+ self.apply_policy()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanScannerPool()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py
new file mode 100644
index 00000000..b7e3c67c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+---
+module: na_ontap_vserver_cifs_security
+short_description: NetApp ONTAP vserver CIFS security modification
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - modify vserver CIFS security.
+
+options:
+
+ vserver:
+ description:
+ - name of the vserver.
+ required: true
+ type: str
+
+ kerberos_clock_skew:
+ description:
+ - The clock skew in minutes is the tolerance for accepting tickets with time stamps that do not exactly match the host's system clock.
+ type: int
+
+ kerberos_ticket_age:
+ description:
+ - Determine the maximum amount of time in hours that a user's ticket may be used for the purpose of Kerberos authentication.
+ type: int
+
+ kerberos_renew_age:
+ description:
+ - Determine the maximum amount of time in days for which a ticket can be renewed.
+ type: int
+
+ kerberos_kdc_timeout:
+ description:
+ - Determine the timeout value in seconds for KDC connections.
+ type: int
+
+ is_signing_required:
+ description:
+ - Determine whether signing is required for incoming CIFS traffic.
+ type: bool
+
+ is_password_complexity_required:
+ description:
+ - Determine whether password complexity is required for local users.
+ type: bool
+
+ is_aes_encryption_enabled:
+ description:
+ - Determine whether AES-128 and AES-256 encryption mechanisms are enabled for Kerberos-related CIFS communication.
+ type: bool
+
+ is_smb_encryption_required:
+ description:
+ - Determine whether SMB encryption is required for incoming CIFS traffic.
+ type: bool
+
+ lm_compatibility_level:
+ description:
+ - Determine the LM compatibility level.
+ choices: ['lm_ntlm_ntlmv2_krb', 'ntlm_ntlmv2_krb', 'ntlmv2_krb', 'krb']
+ type: str
+
+ referral_enabled_for_ad_ldap:
+ description:
+ - Determine whether LDAP referral chasing is enabled or not for AD LDAP connections.
+ type: bool
+
+ session_security_for_ad_ldap:
+ description:
+ - Determine the level of security required for LDAP communications.
+ choices: ['none', 'sign', 'seal']
+ type: str
+
+ smb1_enabled_for_dc_connections:
+ description:
+ - Determine if SMB version 1 is used for connections to domain controllers.
+ choices: ['false', 'true', 'system_default']
+ type: str
+
+ smb2_enabled_for_dc_connections:
+ description:
+ - Determine if SMB version 2 is used for connections to domain controllers.
+ choices: ['false', 'true', 'system_default']
+ type: str
+
+ use_start_tls_for_ad_ldap:
+ description:
+ - Determine whether to use start_tls for AD LDAP connections.
+ type: bool
+
+'''
+
+EXAMPLES = '''
+ - name: modify cifs security
+ na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ is_aes_encryption_enabled: false
+ lm_compatibility_level: lm_ntlm_ntlmv2_krb
+ smb1_enabled_for_dc_connections: system_default
+ smb2_enabled_for_dc_connections: system_default
+ use_start_tls_for_ad_ldap: false
+ referral_enabled_for_ad_ldap: false
+ session_security_for_ad_ldap: none
+ is_signing_required: false
+ is_password_complexity_required: false
+
+ - name: modify cifs security is_smb_encryption_required
+ na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ is_smb_encryption_required: false
+
+ - name: modify cifs security int options
+ na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ kerberos_clock_skew: 10
+ kerberos_ticket_age: 10
+ kerberos_renew_age: 5
+ kerberos_kdc_timeout: 3
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCifsSecurity(object):
+ '''
+ modify vserver cifs security
+ '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ kerberos_clock_skew=dict(required=False, type='int'),
+ kerberos_ticket_age=dict(required=False, type='int'),
+ kerberos_renew_age=dict(required=False, type='int'),
+ kerberos_kdc_timeout=dict(required=False, type='int'),
+ is_signing_required=dict(required=False, type='bool'),
+ is_password_complexity_required=dict(required=False, type='bool'),
+ is_aes_encryption_enabled=dict(required=False, type='bool'),
+ is_smb_encryption_required=dict(required=False, type='bool'),
+ lm_compatibility_level=dict(required=False, choices=['lm_ntlm_ntlmv2_krb', 'ntlm_ntlmv2_krb', 'ntlmv2_krb', 'krb']),
+ referral_enabled_for_ad_ldap=dict(required=False, type='bool'),
+ session_security_for_ad_ldap=dict(required=False, choices=['none', 'sign', 'seal']),
+ smb1_enabled_for_dc_connections=dict(required=False, choices=['false', 'true', 'system_default']),
+ smb2_enabled_for_dc_connections=dict(required=False, choices=['false', 'true', 'system_default']),
+ use_start_tls_for_ad_ldap=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+
+ self.na_helper.zapi_int_keys = {
+ 'kerberos_clock_skew': 'kerberos-clock-skew',
+ 'kerberos_ticket_age': 'kerberos-ticket-age',
+ 'kerberos_renew_age': 'kerberos-renew-age',
+ 'kerberos_kdc_timeout': 'kerberos-kdc-timeout'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'is_signing_required': 'is-signing-required',
+ 'is_password_complexity_required': 'is-password-complexity-required',
+ 'is_aes_encryption_enabled': 'is-aes-encryption-enabled',
+ 'is_smb_encryption_required': 'is-smb-encryption-required',
+ 'referral_enabled_for_ad_ldap': 'referral-enabled-for-ad-ldap',
+ 'use_start_tls_for_ad_ldap': 'use-start-tls-for-ad-ldap'
+ }
+ self.na_helper.zapi_str_keys = {
+ 'lm_compatibility_level': 'lm-compatibility-level',
+ 'session_security_for_ad_ldap': 'session-security-for-ad-ldap',
+ 'smb1_enabled_for_dc_connections': 'smb1-enabled-for-dc-connections',
+ 'smb2_enabled_for_dc_connections': 'smb2-enabled-for-dc-connections'
+ }
+
+ def cifs_security_get_iter(self):
+ """
+ get current vserver cifs security.
+ :return: a dict of vserver cifs security
+ """
+ cifs_security_get = netapp_utils.zapi.NaElement('cifs-security-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cifs_security = netapp_utils.zapi.NaElement('cifs-security')
+ cifs_security.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(cifs_security)
+ cifs_security_get.add_child_elem(query)
+ cifs_security_details = dict()
+ try:
+ result = self.server.invoke_successfully(cifs_security_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cifs security from %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ cifs_security_info = result.get_child_by_name('attributes-list').get_child_by_name('cifs-security')
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ cifs_security_details[option] = self.na_helper.get_value_for_int(from_zapi=True, value=cifs_security_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ cifs_security_details[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=cifs_security_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ if cifs_security_info.get_child_content(zapi_key) is None:
+ cifs_security_details[option] = None
+ else:
+ cifs_security_details[option] = cifs_security_info.get_child_content(zapi_key)
+ return cifs_security_details
+ return None
+
+ def cifs_security_modify(self, modify):
+ """
+ :param modify: A list of attributes to modify
+ :return: None
+ """
+ cifs_security_modify = netapp_utils.zapi.NaElement('cifs-security-modify')
+ for attribute in modify:
+ cifs_security_modify.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(cifs_security_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs security on %s: %s'
+ % (self.parameters['vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def apply(self):
+ """Call modify operations."""
+ self.asup_log_for_cserver("na_ontap_vserver_cifs_security")
+ current = self.cifs_security_get_iter()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify:
+ self.cifs_security_modify(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ obj = NetAppONTAPCifsSecurity()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py
new file mode 100644
index 00000000..90a4c077
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete vserver peer
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_vserver_peer
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - Whether the specified vserver peer should exist or not.
+ default: present
+ vserver:
+ description:
+ - Specifies name of the source Vserver in the relationship.
+ required: true
+ type: str
+ applications:
+ type: list
+ elements: str
+ description:
+ - List of applications which can make use of the peering relationship.
+ - FlexCache supported from ONTAP 9.5 onwards.
+ peer_vserver:
+ description:
+ - Specifies name of the peer Vserver in the relationship.
+ required: true
+ type: str
+ peer_cluster:
+ description:
+ - Specifies name of the peer Cluster.
+ - Required for creating the vserver peer relationship with a remote cluster
+ type: str
+ dest_hostname:
+ description:
+ - Destination hostname or IP address.
+ - Required for creating the vserver peer relationship with a remote cluster
+ type: str
+ dest_username:
+ description:
+ - Destination username.
+ - Optional if this is same as source username.
+ type: str
+ dest_password:
+ description:
+ - Destination password.
+ - Optional if this is same as source password.
+ type: str
+short_description: NetApp ONTAP Vserver peering
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ - name: Source vserver peer create
+ na_ontap_vserver_peer:
+ state: present
+ peer_vserver: ansible2
+ peer_cluster: ansibleCluster
+ vserver: ansible
+ applications: ['snapmirror']
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ dest_hostname: "{{ netapp_dest_hostname }}"
+
+ - name: vserver peer delete
+ na_ontap_vserver_peer:
+ state: absent
+ peer_vserver: ansible2
+ vserver: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPVserverPeer(object):
+ """
+ Class with vserver peer methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ peer_vserver=dict(required=True, type='str'),
+ peer_cluster=dict(required=False, type='str'),
+ applications=dict(required=False, type='list', elements='str'),
+ dest_hostname=dict(required=False, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ if self.parameters.get('dest_hostname'):
+ self.module.params['hostname'] = self.parameters['dest_hostname']
+ if self.parameters.get('dest_username'):
+ self.module.params['username'] = self.parameters['dest_username']
+ if self.parameters.get('dest_password'):
+ self.module.params['password'] = self.parameters['dest_password']
+ self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # reset to source host connection for asup logs
+ self.module.params['hostname'] = self.parameters['hostname']
+ self.module.params['username'] = self.parameters['username']
+ self.module.params['password'] = self.parameters['password']
+
+ def vserver_peer_get_iter(self):
+ """
+ Compose NaElement object to query current vserver using peer-vserver and vserver parameters
+ :return: NaElement object for vserver-get-iter with query
+ """
+ vserver_peer_get = netapp_utils.zapi.NaElement('vserver-peer-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ vserver_peer_info = netapp_utils.zapi.NaElement('vserver-peer-info')
+ vserver_peer_info.add_new_child('peer-vserver', self.parameters['peer_vserver'])
+ vserver_peer_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(vserver_peer_info)
+ vserver_peer_get.add_child_elem(query)
+ return vserver_peer_get
+
+ def vserver_peer_get(self):
+ """
+ Get current vserver peer info
+ :return: Dictionary of current vserver peer details if query successful, else return None
+ """
+ vserver_peer_get_iter = self.vserver_peer_get_iter()
+ vserver_info = dict()
+ try:
+ result = self.server.invoke_successfully(vserver_peer_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ # return vserver peer details
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ vserver_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('vserver-peer-info')
+ vserver_info['peer_vserver'] = vserver_peer_info.get_child_content('peer-vserver')
+ vserver_info['vserver'] = vserver_peer_info.get_child_content('vserver')
+ vserver_info['peer_state'] = vserver_peer_info.get_child_content('peer-state')
+ return vserver_info
+ return None
+
+ def vserver_peer_delete(self):
+ """
+ Delete a vserver peer
+ """
+ vserver_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-delete', **{'peer-vserver': self.parameters['peer_vserver'],
+ 'vserver': self.parameters['vserver']})
+ try:
+ self.server.invoke_successfully(vserver_peer_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_peer_cluster_name(self):
+ """
+ Get local cluster name
+ :return: cluster name
+ """
+ cluster_info = netapp_utils.zapi.NaElement('cluster-identity-get')
+ try:
+ result = self.server.invoke_successfully(cluster_info, enable_tunneling=True)
+ return result.get_child_by_name('attributes').get_child_by_name(
+ 'cluster-identity-info').get_child_content('cluster-name')
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching peer cluster name for peer vserver %s: %s'
+ % (self.parameters['peer_vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def vserver_peer_create(self):
+ """
+ Create a vserver peer
+ """
+ if self.parameters.get('applications') is None:
+ self.module.fail_json(msg='applications parameter is missing')
+ if self.parameters.get('peer_cluster') is not None and self.parameters.get('dest_hostname') is None:
+ self.module.fail_json(msg='dest_hostname is required for peering a vserver in remote cluster')
+ if self.parameters.get('peer_cluster') is None:
+ self.parameters['peer_cluster'] = self.get_peer_cluster_name()
+ vserver_peer_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-create', **{'peer-vserver': self.parameters['peer_vserver'],
+ 'vserver': self.parameters['vserver'],
+ 'peer-cluster': self.parameters['peer_cluster']})
+ applications = netapp_utils.zapi.NaElement('applications')
+ for application in self.parameters['applications']:
+ applications.add_new_child('vserver-peer-application', application)
+ vserver_peer_create.add_child_elem(applications)
+ try:
+ self.server.invoke_successfully(vserver_peer_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def is_remote_peer(self):
+ if self.parameters.get('dest_hostname') is None or \
+ (self.parameters['dest_hostname'] == self.parameters['hostname']):
+ return False
+ return True
+
+ def vserver_peer_accept(self):
+ """
+ Accept a vserver peer at destination
+ """
+ # peer-vserver -> remote (source vserver is provided)
+ # vserver -> local (destination vserver is provided)
+ vserver_peer_accept = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-accept', **{'peer-vserver': self.parameters['vserver'],
+ 'vserver': self.parameters['peer_vserver']})
+ try:
+ self.dest_server.invoke_successfully(vserver_peer_accept, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error accepting vserver peer %s: %s'
+ % (self.parameters['peer_vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ """
+ Apply action to create/delete or accept vserver peer
+ """
+ self.asup_log_for_cserver("na_ontap_vserver_peer")
+ current = self.vserver_peer_get()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.vserver_peer_create()
+ # accept only if the peer relationship is on a remote cluster
+ if self.is_remote_peer():
+ self.vserver_peer_accept()
+ elif cd_action == 'delete':
+ self.vserver_peer_delete()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPVserverPeer()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py
new file mode 100644
index 00000000..c98c24f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Loop over an ONTAP get status request until a condition is satisfied.
+ - Report a timeout error if C(timeout) is exceeded while waiting for the condition.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_wait_for_condition
+short_description: NetApp ONTAP wait_for_condition. Loop over a get status request until a condition is met.
+version_added: 20.8.0
+options:
+ name:
+ description:
+ - The name of the event to check for.
+ choices: ['sp_upgrade', 'sp_version']
+ type: str
+ required: true
+ state:
+ description:
+ - whether the conditions should be present or absent.
+ - if C(present), the module exits when any of the conditions is observed.
+ - if C(absent), the module exits with success when None of the conditions is observed.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ conditions:
+ description:
+ - one or more conditions to match
+ - for instance C(is_in_progress) for C(sp_upgrade), C(firmware_version) for C(sp_version).
+ type: list
+ elements: str
+ required: true
+ polling_interval:
+ description:
+ - how ofen to check for the conditions, in seconds.
+ default: 5
+ type: int
+ timeout:
+ description:
+ - how long to wait for the conditions, in seconds.
+ default: 180
+ type: int
+ attributes:
+ description:
+ - a dictionary of custom attributes for the event.
+ - for instance, C(sp_upgrade), C(sp_version) require C(node).
+ - C(sp_version) requires C(expectd_version).
+ type: dict
+'''
+
+EXAMPLES = """
+ - name: wait for sp_upgrade in progress
+ na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_upgrade
+ conditions: is_in_progress
+ attributes:
+ node: "{{ node }}"
+ polling_interval: 30
+ timeout: 1800
+
+ - name: wait for sp_upgrade not in progress
+ na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_upgrade
+ conditions: is_in_progress
+ state: absent
+ attributes:
+ node: "{{ ontap_admin_ip }}"
+ polling_interval: 30
+ timeout: 1800
+
+ - name: wait for sp_version to match 3.9
+ na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_version
+ conditions: firmware_version
+ state: present
+ attributes:
+ node: "{{ ontap_admin_ip }}"
+ expected_version: 3.9
+ polling_interval: 30
+ timeout: 1800
+"""
+
+RETURN = """
+states:
+ description:
+ - summarized list of observed states while waiting for completion
+ - reported for success or timeout error
+ returned: always
+ type: str
+last_state:
+ description: last observed state for event
+ returned: always
+ type: str
+"""
+
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPWFC(object):
+ ''' wait for a resource to match a condition or not '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', choices=['sp_upgrade', 'sp_version']),
+ conditions=dict(required=True, type='list', elements='str'),
+ polling_interval=dict(required=False, type='int', default=5),
+ timeout=dict(required=False, type='int', default=180),
+ attributes=dict(required=False, type='dict')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('name', 'sp_upgrade', ['attributes']),
+ ('name', 'sp_version', ['attributes']),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.states = list()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ self.resource_configuration = dict(
+ sp_upgrade=dict(
+ required_attributes=['node'],
+ conditions=dict(
+ is_in_progress=('is-in-progress', "true")
+ )
+ ),
+ sp_version=dict(
+ required_attributes=['node', 'expected_version'],
+ conditions=dict(
+ firmware_version=('firmware-version', self.parameters['attributes'].get('expected_version'))
+ )
+ )
+ )
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ try:
+ netapp_utils.ems_log_event(event_name, cserver)
+ except netapp_utils.zapi.NaApiError:
+ pass
+
+ def get_key_value(self, xml, key):
+ for child in xml.get_children():
+ value = xml.get_child_content(key)
+ if value is not None:
+ return value
+ value = self.get_key_value(child, key)
+ if value is not None:
+ return value
+ return None
+
+ def build_zapi(self, name):
+ ''' build ZAPI request based on resource name '''
+ if name == 'sp_upgrade':
+ zapi_obj = netapp_utils.zapi.NaElement("service-processor-image-update-progress-get")
+ zapi_obj.add_new_child('node', self.parameters['attributes']['node'])
+ return zapi_obj
+ if name == 'sp_version':
+ zapi_obj = netapp_utils.zapi.NaElement("service-processor-get")
+ zapi_obj.add_new_child('node', self.parameters['attributes']['node'])
+ return zapi_obj
+ raise KeyError(name)
+
+ def extract_condition(self, name, results):
+ ''' check if any of the conditions is present
+ return:
+ None, error if key is not found
+ condition, None if a key is found with expected value
+ None, None if every key does not match the expected values
+ '''
+ error = None
+ for condition, (key, value) in self.resource_configuration[name]['conditions'].items():
+ status = self.get_key_value(results, key)
+ self.states.append(str(status))
+ if status == str(value):
+ return condition, error
+ if status is None:
+ error = 'Cannot find element with name: %s in results: %s' % (key, results.to_string())
+ return None, error
+ # not found, or no match
+ return None, None
+
+ def get_condition(self, name, zapi_obj):
+ ''' calls the ZAPI and extract condition value'''
+ try:
+ results = self.server.invoke_successfully(zapi_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ error = 'Error running command %s: %s' % (self.parameters['name'], to_native(error))
+ return None, error
+
+ condition, error = self.extract_condition(name, results)
+ if error is not None:
+ self.module.fail_json(msg='Error: %s' % error)
+ if self.parameters['state'] == 'present':
+ if condition in self.parameters['conditions']:
+ return 'matched condition: %s' % condition, None
+ else:
+ if condition is None:
+ return 'conditions not matched', None
+ if condition not in self.parameters['conditions']:
+ return 'conditions not matched: found other condition: %s' % condition, None
+ return None, None
+
+ def summarize_states(self):
+ ''' replaces a long list of states with multipliers
+ eg 'false'*5
+ return:
+ state_list as str
+ last_state
+ '''
+ previous_state = None
+ count = 0
+ summary = ''
+ for state in self.states:
+ if state == previous_state:
+ count += 1
+ else:
+ if previous_state is not None:
+ summary += '%s%s' % (previous_state, '' if count == 1 else '*%d' % count)
+ count = 1
+ previous_state = state
+ if previous_state is not None:
+ summary += '%s%s' % (previous_state, '' if count == 1 else '*%d' % count)
+ last_state = self.states[-1] if self.states else ''
+ return summary, last_state
+
+ def wait_for_condition(self, name):
+ ''' calls the ZAPI and extract condition value - loop until found '''
+ time_left = self.parameters['timeout']
+ max_consecutive_error_count = 3
+ error_count = 0
+ zapi_obj = self.build_zapi(name)
+
+ while time_left > 0:
+ condition, error = self.get_condition(name, zapi_obj)
+ if error is not None:
+ error_count += 1
+ if error_count >= max_consecutive_error_count:
+ self.module.fail_json(msg='Error: %s - count: %d' % (error, error_count))
+ elif condition is not None:
+ return condition
+ time.sleep(self.parameters['polling_interval'])
+ time_left -= self.parameters['polling_interval']
+
+ error = 'Error: timeout waiting for condition%s: %s.' %\
+ ('s' if len(self.parameters['conditions']) > 1 else '',
+ ', '.join(self.parameters['conditions']))
+ states, last_state = self.summarize_states()
+ self.module.fail_json(msg=error, states=states, last_state=last_state)
+
+ def validate_resource(self, name):
+ if name not in self.resource_configuration:
+ raise KeyError('%s - configuration entry missing for resource' % name)
+
+ def validate_attributes(self, name):
+ required = self.resource_configuration[name].get('required_attributes', list())
+ msgs = list()
+ for attribute in required:
+ if attribute not in self.parameters['attributes']:
+ msgs.append('attributes: %s is required for resource name: %s' % (attribute, name))
+ if msgs:
+ self.module.fail_json(msg='Error: %s' % ', '.join(msgs))
+
+ def validate_conditions(self, name):
+ conditions = self.resource_configuration[name].get('conditions')
+ msgs = list()
+ for condition in self.parameters['conditions']:
+ if condition not in conditions:
+ msgs.append('condition: %s is not valid for resource name: %s' % (condition, name))
+ if msgs:
+ msgs.append('valid condition%s: %s' %
+ ('s are' if len(conditions) > 1 else ' is', ', '.join(conditions.keys())))
+ self.module.fail_json(msg='Error: %s' % ', '.join(msgs))
+
+ def apply(self):
+ ''' calls the ZAPI and check conditions '''
+ changed = False
+ self.asup_log_for_cserver("na_ontap_wait_for_condition: %s " % self.parameters['name'])
+ name = self.parameters['name']
+ self.validate_resource(name)
+ self.validate_attributes(name)
+ self.validate_conditions(name)
+ output = self.wait_for_condition(name)
+ states, last_state = self.summarize_states()
+ self.module.exit_json(changed=changed, msg=output, states=states, last_state=last_state)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPWFC()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py
new file mode 100644
index 00000000..78b097dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_wwpn_alias
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_wwpn_alias
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP set FCP WWPN Alias
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Create/Delete FCP WWPN Alias
+
+options:
+ state:
+ description:
+ - Whether the specified alias should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - The name of the alias to create or delete.
+ required: true
+ type: str
+
+ wwpn:
+ description:
+ - WWPN of the alias.
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Create FCP Alias
+ na_ontap_wwpn_alias:
+ state: present
+ name: alias1
+ wwpn: 01:02:03:04:0a:0b:0c:0d
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete FCP Alias
+ na_ontap_wwpn_alias:
+ state: absent
+ name: alias1
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+class NetAppOntapWwpnAlias(object):
+ ''' ONTAP WWPN alias operations '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=[
+ 'present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ wwpn=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['wwpn'])],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API should be used for ONTAP 9.6 or higher.
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_wwpn_alias'))
+
+ def get_alias(self, uuid):
+ params = {'fields': 'alias,wwpn',
+ 'alias': self.parameters['name'],
+ 'svm.uuid': uuid}
+ api = 'network/fc/wwpn-aliases'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching wwpn alias: %s" % error)
+ if message['num_records'] > 0:
+ return {'name': message['records'][0]['alias'],
+ 'wwpn': message['records'][0]['wwpn'],
+ }
+ else:
+ return None
+
+ def create_alias(self, uuid, is_modify=False):
+ params = {'alias': self.parameters['name'],
+ 'wwpn': self.parameters['wwpn'],
+ 'svm.uuid': uuid}
+ api = 'network/fc/wwpn-aliases'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ if is_modify:
+ self.module.fail_json(msg="Error on modifying wwpn alias when trying to re-create alias: %s." % error)
+ else:
+ self.module.fail_json(msg="Error on creating wwpn alias: %s." % error)
+
+ def delete_alias(self, uuid, is_modify=False):
+ api = 'network/fc/wwpn-aliases/%s/%s' % (uuid, self.parameters['name'])
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ if is_modify:
+ self.module.fail_json(msg="Error on modifying wwpn alias when trying to delete alias: %s." % error)
+ else:
+ self.module.fail_json(msg="Error on deleting wwpn alias: %s." % error)
+
+ def get_svm_uuid(self):
+ """
+ Get a svm's UUID
+ :return: uuid of the svm.
+ """
+ params = {'fields': 'uuid', 'name': self.parameters['vserver']}
+ api = "svm/svms"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching svm uuid: %s" % error)
+ return message['records'][0]['uuid']
+
+ def apply(self):
+ cd_action, uuid, modify = None, None, None
+ uuid = self.get_svm_uuid()
+ current = self.get_alias(uuid)
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_alias(uuid)
+ elif cd_action == 'delete':
+ self.delete_alias(uuid)
+ elif modify:
+ self.delete_alias(uuid, is_modify=True)
+ self.create_alias(uuid, is_modify=True)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ alias = NetAppOntapWwpnAlias()
+ alias.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py
new file mode 100644
index 00000000..29b9de70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Call a ZAPI on ONTAP.
+ - Cluster ZAPIs are run using a cluster admin account.
+ - Vserver ZAPIs can be run using a vsadmin account or using vserver tunneling (cluster admin with I(vserver option)).
+ - In case of success, a json dictionary is returned as C(response).
+ - In case of a ZAPI error, C(status), C(errno), C(reason) are set to help with diagnosing the issue,
+ - and the call is reported as an error ('failed').
+ - Other errors (eg connection issues) are reported as Ansible error.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_zapit
+short_description: NetApp ONTAP Run any ZAPI on ONTAP
+version_added: "20.4.0"
+options:
+ zapi:
+ description:
+ - A dictionary for the zapi and arguments.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - A single zapi can be called at a time. Ansible warns if duplicate keys are found and only uses the last entry.
+ required: true
+ type: dict
+ vserver:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+'''
+
+EXAMPLES = """
+-
+ name: Ontap ZAPI
+ hosts: localhost
+ gather_facts: False
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ admin_ip }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ https: true
+ validate_certs: false
+ svm_login: &svm_login
+ hostname: "{{ svm_admin_ip }}"
+ username: "{{ svm_admin_username }}"
+ password: "{{ svm_admin_password }}"
+ https: true
+ validate_certs: false
+
+ tasks:
+ - name: run ontap ZAPI command as cluster admin
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ system-get-version:
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as cluster admin
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ vserver-get-iter:
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as cluster admin
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - aggr-list:
+ - aggr-name
+ - allowed-protocols:
+ - protocols
+ - vserver-aggr-info-list:
+ - vserser-aggr-info
+ - uuid
+ query:
+ vserver-info:
+ vserver-name: trident_svm
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as vsadmin
+ na_ontap_zapit:
+ <<: *svm_login
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - uuid
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as vserver tunneling
+ na_ontap_zapit:
+ <<: *login
+ vserver: trident_svm
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - uuid
+ register: output
+ - debug: var=output
+
+ - name: run ontap active-directory ZAPI command
+ na_ontap_zapit:
+ <<: *login
+ vserver: trident_svm
+ zapi:
+ active-directory-account-create:
+ account-name: testaccount
+ admin-username: testuser
+ admin-password: testpass
+ domain: testdomain
+ organizational-unit: testou
+ register: output
+ ignore_errors: True
+ - debug: var=output
+
+"""
+
+RETURN = """
+response:
+ description:
+ - If successful, a json dictionary representing the data returned by the ZAPI.
+ - If the ZAPI was executed but failed, an empty dictionary.
+ - Not present if the ZAPI call cannot be performed.
+ returned: On success
+ type: dict
+status:
+ description:
+ - If the ZAPI was executed but failed, the status set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+errno:
+ description:
+ - If the ZAPI was executed but failed, the error code set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+reason:
+ description:
+ - If the ZAPI was executed but failed, the error reason set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPZapi(object):
+ ''' calls a ZAPI command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ zapi=dict(required=True, type='dict'),
+ vserver=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.zapi = parameters['zapi']
+ self.vserver = parameters['vserver']
+
+ if not HAS_JSON:
+ self.module.fail_json(msg="the python json module is required")
+ if not HAS_XMLTODICT:
+ self.module.fail_json(msg="the python xmltodict module is required")
+ if not HAS_NETAPP_LIB:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+
+ if self.vserver is not None:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ try:
+ netapp_utils.ems_log_event(event_name, cserver)
+ except netapp_utils.zapi.NaApiError:
+ pass
+
+ def jsonify_and_parse_output(self, xml_data):
+ ''' convert from XML to JSON
+ extract status and error fields is present
+ '''
+ try:
+ as_dict = xmltodict.parse(xml_data.to_string(), xml_attribs=True)
+ except Exception as exc:
+ self.module.fail_json(msg='Error running zapi in xmltodict: %s: %s' %
+ (xml_data.to_string(), str(exc)))
+ try:
+ as_json = json.loads(json.dumps(as_dict))
+ except Exception as exc:
+ self.module.fail_json(msg='Error running zapi in json load/dump: %s: %s' %
+ (as_dict, str(exc)))
+
+ if 'results' not in as_json:
+ self.module.fail_json(msg='Error running zapi, no results field: %s: %s' %
+ (xml_data.to_string(), repr(as_json)))
+
+ # set status, and if applicable errno/reason, and remove attribute fields
+ errno = None
+ reason = None
+ response = as_json.pop('results')
+ status = response.get('@status', 'no_status_attr')
+ if status != 'passed':
+ # collect errno and reason
+ errno = response.get('@errno', None)
+ if errno is None:
+ errno = response.get('errorno', None)
+ if errno is None:
+ errno = 'ESTATUSFAILED'
+ reason = response.get('@reason', None)
+ if reason is None:
+ reason = response.get('reason', None)
+ if reason is None:
+ reason = 'Execution failure with unknown reason.'
+
+ for key in ('@status', '@errno', '@reason', '@xmlns'):
+ try:
+ # remove irrelevant info
+ del response[key]
+ except KeyError:
+ pass
+ return response, status, errno, reason
+
+ def run_zapi(self):
+ ''' calls the ZAPI '''
+ zapi_struct = self.zapi
+ error = None
+ if not isinstance(zapi_struct, dict):
+ error = 'A directory entry is expected, eg: system-get-version: '
+ zapi = zapi_struct
+ else:
+ zapi = list(zapi_struct.keys())
+ if len(zapi) != 1:
+ error = 'A single ZAPI can be called at a time'
+ else:
+ zapi = zapi[0]
+
+ # log first, then error out as needed
+ self.ems(zapi)
+ if error:
+ self.module.fail_json(msg='%s, received: %s' % (error, zapi))
+
+ zapi_obj = netapp_utils.zapi.NaElement(zapi)
+ attributes = zapi_struct[zapi]
+ if attributes is not None and attributes != 'None':
+ zapi_obj.translate_struct(attributes)
+
+ try:
+ output = self.server.invoke_elem(zapi_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error running zapi %s: %s' %
+ (zapi, to_native(error)),
+ exception=traceback.format_exc())
+
+ return self.jsonify_and_parse_output(output)
+
+ def ems(self, zapi):
+ """
+ Error out if Cluster Admin username is used with Vserver, or Vserver admin used with out vserver being set
+ :return:
+ """
+ if self.vserver:
+ try:
+ netapp_utils.ems_log_event("na_ontap_zapi" + str(zapi), self.server)
+ except netapp_utils.zapi.NaApiError as error:
+ pass
+ else:
+ self.asup_log_for_cserver("na_ontap_zapi: " + str(zapi))
+
+ def apply(self):
+ ''' calls the zapi and returns json output '''
+ response, status, errno, reason = self.run_zapi()
+ if status == 'passed':
+ self.module.exit_json(changed=True, response=response)
+ msg = 'ZAPI failure: check errno and reason.'
+ self.module.fail_json(changed=False, response=response, status=status, errno=errno, reason=reason, msg=msg)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ zapi = NetAppONTAPZapi()
+ zapi.apply()
+
+
+if __name__ == '__main__':
+ main()