summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/netapp
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/netapp
parentInitial commit. (diff)
downloadansible-upstream.tar.xz
ansible-upstream.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/netapp')
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/FILES.json194
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/MANIFEST.json33
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/README.md56
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/plugins/doc_fragments/netapp.py47
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/plugins/module_utils/netapp.py159
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/plugins/module_utils/netapp_module.py142
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_active_directory.py274
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_filesystems.py362
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_pool.py267
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_snapshots.py245
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/builtins.py33
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/mock.py122
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/unittest.py38
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py111
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py148
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py251
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py140
-rw-r--r--collections-debian-merged/ansible_collections/netapp/aws/tests/unit/requirements.txt1
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/CHANGELOG.rst147
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/FILES.json551
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/MANIFEST.json32
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/README.md102
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/.plugin-cache.yaml135
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/changelog.yaml179
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/config.yaml32
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml21
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml7
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py51
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py98
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py206
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py225
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py397
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py247
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py340
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py233
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py243
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py154
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py372
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py331
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py206
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py365
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py361
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py269
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py343
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py254
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py423
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py357
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py241
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py369
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py203
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py576
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py274
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py413
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py276
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py293
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py33
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py122
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py38
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py175
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py245
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py137
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py228
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py157
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py176
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py328
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py201
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py293
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py324
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py300
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py138
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py343
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py364
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py149
-rw-r--r--collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/requirements.txt2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/CHANGELOG.rst608
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/FILES.json2546
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/MANIFEST.json32
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/README.md532
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/.plugin-cache.yaml520
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/changelog.yaml879
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/config.yaml32
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/19.10.0.yaml40
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/19.11.0.yaml16
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.1.0.yaml20
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.2.0.yaml17
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.3.0.yaml8
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.4.0.yaml30
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.4.1.yaml10
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.5.0.yaml53
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.6.0.yaml37
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.6.1.yaml9
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.7.0.yaml24
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.8.0.yaml33
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.9.0.yaml17
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2426.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2668.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2964.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2965.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3113.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3139.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3149.yaml6
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3167.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3178.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3181.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3194.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3251.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3262.yaml6
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3304.yaml5
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3310.yml5
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3312.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3329.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3346.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3354.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3358.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3366.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3367.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3368.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3369.yaml4
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3371.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3385.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3386.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3390.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3392.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3399.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3400.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3401.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3442.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3443.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3454.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/github-56.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/README.md37
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/json_query/README.md30
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml76
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate.yml209
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml202
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml46
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml47
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml45
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/ontap_vars_file.yml27
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py106
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py745
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py159
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py392
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py160
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py93
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py133
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py233
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py824
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py289
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py196
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py443
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py220
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py227
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py324
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py243
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py387
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py525
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py135
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py332
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py319
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py235
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py363
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py316
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py310
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py458
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py217
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py360
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py366
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py737
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py470
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py357
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py198
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py1787
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py613
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py311
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py273
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py339
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py394
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py357
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py228
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py419
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py333
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py276
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py757
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py188
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py287
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py185
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py170
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py222
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py210
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py216
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py407
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py315
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py236
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py434
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py332
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py190
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py599
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py147
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py360
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py292
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py228
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py211
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py221
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py363
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py284
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py385
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py287
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py335
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py317
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py457
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py255
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py450
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py144
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py617
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py305
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py455
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py233
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py292
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py895
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py837
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py333
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py500
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py154
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py133
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py417
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py255
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py540
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py162
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py245
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py353
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py259
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py712
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py274
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py2100
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py364
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py280
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py226
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py182
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py424
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py326
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py312
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py301
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py299
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py347
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py193
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py344
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/LICENSE661
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/README.md127
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/defaults/main.yml25
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/handlers/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/meta/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tasks/main.yml203
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/inventory2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/test.yml5
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/LICENSE674
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/README.md65
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/defaults/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/handlers/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/meta/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tasks/main.yml63
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/inventory2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/test.yml5
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/LICENSE674
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/README.md67
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/defaults/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/handlers/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/meta/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tasks/main.yml65
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/inventory2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/test.yml5
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/README.md70
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/defaults/main.yml13
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/handlers/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/meta/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tasks/main.yml55
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/inventory2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/test.yml5
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/LICENSE674
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/README.md105
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/defaults/main.yml14
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/handlers/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/meta/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tasks/main.yml187
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/inventory2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/test.yml5
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/sanity/ignore-2.10.txt6
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/sanity/ignore-2.9.txt1
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/builtins.py33
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/mock.py122
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/unittest.py38
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp.py468
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_module.py400
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py419
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport.py245
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py135
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py309
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py116
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs.py228
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_server.py222
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster.py429
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_peer.py212
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_command.py205
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_dns.py321
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py232
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy.py289
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py269
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py173
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firewall_policy.py296
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py436
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_flexcache.py531
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup.py260
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py218
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_info.py557
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_interface.py312
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ipspace.py269
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi_security.py256
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_job_schedule.py369
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py269
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ldap_client.py185
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_login_messages.py287
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun.py177
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_copy.py155
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map.py192
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_rest.py277
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py156
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster.py149
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py196
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_motd.py182
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_service_switch.py180
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ndmp.py227
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py299
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_port.py180
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_routes.py461
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_subnet.py265
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs.py309
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py268
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py225
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme.py217
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py201
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py242
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_object_store.py300
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ports.py173
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_portset.py190
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py347
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py340
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qtree.py464
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quota_policy.py207
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quotas.py243
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py137
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_info.py543
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_certificates.py435
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_key_manager.py174
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_processor_network.py234
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror.py630
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py717
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot.py227
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py691
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py153
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_software_update.py190
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_svm.py430
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_template.py121
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ucadapter.py176
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_group.py289
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_user.py283
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user.py505
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role.py239
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume.py1183
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_autosize.py243
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone.py257
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_rest.py333
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py166
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan.py234
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py159
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py168
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py188
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py166
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer.py250
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py224
-rw-r--r--collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/requirements.txt3
398 files changed, 90894 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/FILES.json b/collections-debian-merged/ansible_collections/netapp/aws/FILES.json
new file mode 100644
index 00000000..74c78d40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/FILES.json
@@ -0,0 +1,194 @@
+{
+ "files": [
+ {
+ "format": 1,
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": ".",
+ "chksum_type": null
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/doc_fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a629828b5936404e9822e066dc372b9f36d24f0a4ace73c9b2dbf99c8d2bb56d",
+ "name": "plugins/doc_fragments/netapp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/module_utils",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9109380e733e4915f5bd347431c61b0f64489a177fbbaca57baa4b2e018adfbd",
+ "name": "plugins/module_utils/netapp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2d69e77a6e5b76dc8909149c8c364454e80fb42631af7d889dfb6e2ff0438c3e",
+ "name": "plugins/module_utils/netapp_module.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/modules",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6fd061a1e54ce32dfbdeecd58f40f78e0836ec2c6eb2d10d60bce10d581ef40a",
+ "name": "plugins/modules/aws_netapp_cvs_filesystems.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "495a911b97e0593f7c7f9b8d225296394bf0e712dee0e0d922d9d0952fd660df",
+ "name": "plugins/modules/aws_netapp_cvs_active_directory.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8edfd787384f01ef37a0032e60898b0253472355a32e420b439e1dbb4d385e85",
+ "name": "plugins/modules/aws_netapp_cvs_snapshots.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7a4b6fc9d48d61cf80a052455334ffd671dd880e7ec476aff6ccae820b658610",
+ "name": "plugins/modules/aws_netapp_cvs_pool.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/compat",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096",
+ "name": "tests/unit/compat/unittest.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1",
+ "name": "tests/unit/compat/builtins.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "tests/unit/compat/__init__.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99",
+ "name": "tests/unit/compat/mock.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "bae23898d2424f99afdc207e2598c8cb827f78f86cd1880c5e92947a2750d988",
+ "name": "tests/unit/requirements.txt",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/plugins",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/plugins/modules",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1832d488ba9e6378f74e39d04606b3f0cf9d5fc35e7777caf7eec9ab12a6cc77",
+ "name": "tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "90605d26722db28c00b7f430723043a94adfc8490bc7e5055f8fae530886c51a",
+ "name": "tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5f5ad7dfe4fcad1f781209e5f3524eeabc47f75f6a15b3660f7d918de670b026",
+ "name": "tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4d368fda4b642933c3b14ecbbec6b20c5cf397fc1e1adb0c9797286da9b6db56",
+ "name": "tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "73bbfeb60d4f06f66dc2ef058eded510eb8eab2eb1f1bbc6ba2324742e118911",
+ "name": "README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/MANIFEST.json b/collections-debian-merged/ansible_collections/netapp/aws/MANIFEST.json
new file mode 100644
index 00000000..eb531c42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/MANIFEST.json
@@ -0,0 +1,33 @@
+{
+ "collection_info": {
+ "description": "Cloud Volumes Service (CVS) for AWS",
+ "repository": "https://github.com/ansible-collections/netapp",
+ "tags": [
+ "storage",
+ "cloud"
+ ],
+ "dependencies": {},
+ "authors": [
+ "NetApp Ansible Team <ng-ansibleteam@netapp.com>"
+ ],
+ "issues": null,
+ "name": "aws",
+ "license": [
+ "GPL-2.0-or-later"
+ ],
+ "documentation": null,
+ "namespace": "netapp",
+ "version": "20.9.0",
+ "readme": "README.md",
+ "license_file": null,
+ "homepage": "https://netapp.io/configuration-management-and-automation/"
+ },
+ "file_manifest_file": {
+ "format": 1,
+ "ftype": "file",
+ "chksum_sha256": "6d32959d548c4eeb9fc2f62a94868ce46604ea71acafdd0bd50ed765012df889",
+ "name": "FILES.json",
+ "chksum_type": "sha256"
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/README.md b/collections-debian-merged/ansible_collections/netapp/aws/README.md
new file mode 100644
index 00000000..3faa0ee9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/README.md
@@ -0,0 +1,56 @@
+=============================================================
+
+netapp.aws
+
+NetApp AWS CVS Collection
+
+Copyright (c) 2019 NetApp, Inc. All rights reserved.
+Specifications subject to change without notice.
+
+=============================================================
+
+# Installation
+```bash
+ansible-galaxy collection install netapp.aws
+```
+To use Collection add the following to the top of your playbook, with out this you will be using Ansible 2.9 version of the module
+```
+collections:
+ - netapp.aws
+```
+# Need help
+Join our Slack Channel at [Netapp.io](http://netapp.io/slack)
+
+# Notes
+
+These Ansible modules are supporting NetApp Cloud Volumes Service for AWS.
+
+They require a subscription to the service and your API access keys.
+
+The modules currently support Active Directory, Pool, FileSystem (Volume), and Snapshot services.
+
+# Release Notes
+
+
+## 20.9.0
+
+Fix pylint or flake8 warnings reported by galaxy importer.
+
+## 20.8.0
+
+### Module documentation changes
+- use a three group format for `version_added`. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+- add `elements:` and update `required:` to match module requirements.
+
+## 20.6.0
+
+### Bug Fixes
+- galaxy.xml: fix repository and homepage links.
+
+## 20.2.0
+
+### Bug Fixes
+- galaxy.yml: fix path to github repository.
+
+## 19.11.0
+- Initial release as a collection.
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/plugins/doc_fragments/netapp.py b/collections-debian-merged/ansible_collections/netapp/aws/plugins/doc_fragments/netapp.py
new file mode 100644
index 00000000..a2e7335a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/plugins/doc_fragments/netapp.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - This is documentation for NetApp's AWS CVS modules.
+'''
+
+ # Documentation fragment for AWSCVS
+ AWSCVS = """
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The access key to authenticate with the AWSCVS Web Services Proxy or Embedded Web Services API.
+ secret_key:
+ required: true
+ type: str
+ description:
+ - The secret_key to authenticate with the AWSCVS Web Services Proxy or Embedded Web Services API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the AWSCVS Web Services Proxy or Embedded Web Services API.
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+notes:
+ - The modules prefixed with aws\\_cvs\\_netapp are built to Manage AWS Cloud Volumes Service .
+"""
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/plugins/module_utils/netapp.py b/collections-debian-merged/ansible_collections/netapp/aws/plugins/module_utils/netapp.py
new file mode 100644
index 00000000..38e0e351
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/plugins/module_utils/netapp.py
@@ -0,0 +1,159 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2019, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.basic import missing_required_lib
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+COLLECTION_VERSION = "20.9.0"
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+
+def aws_cvs_host_argument_spec():
+
+ return dict(
+ api_url=dict(required=True, type='str'),
+ validate_certs=dict(required=False, type='bool', default=True),
+ api_key=dict(required=True, type='str'),
+ secret_key=dict(required=True, type='str')
+ )
+
+
+class AwsCvsRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.api_key = self.module.params['api_key']
+ self.secret_key = self.module.params['secret_key']
+ self.api_url = self.module.params['api_url']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.api_url + '/v1/'
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None):
+ ''' send http request and process reponse, including error conditions '''
+ if params is not None:
+ self.module.fail_json(msg='params is not implemented. api=%s, params=%s' % (api, repr(params)))
+ url = self.url + api
+ json_dict = None
+ json_error = None
+ error_details = None
+ headers = {
+ 'Content-type': "application/json",
+ 'api-key': self.api_key,
+ 'secret-key': self.secret_key,
+ 'Cache-Control': "no-cache",
+ }
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+
+ except ValueError:
+ return None, None
+ success_code = [200, 201, 202]
+ if response.status_code not in success_code:
+ error = json.get('message')
+ else:
+ error = None
+ return json, error
+ try:
+ response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json)
+ # If the response was successful, no Exception will be raised
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ error_details = str(err)
+ except requests.exceptions.ConnectionError as err:
+ error_details = str(err)
+ except Exception as err:
+ error_details = str(err)
+ if json_error is not None:
+ error_details = json_error
+
+ return json_dict, error_details
+
+ # If an error was reported in the json payload, it is handled below
+ def get(self, api, params=None):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def put(self, api, data, params=None):
+ method = 'PUT'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def get_state(self, job_id):
+ """ Method to get the state of the job """
+ response, dummy = self.get('Jobs/%s' % job_id)
+ while str(response['state']) not in 'done':
+ response, dummy = self.get('Jobs/%s' % job_id)
+ return 'done'
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/plugins/module_utils/netapp_module.py b/collections-debian-merged/ansible_collections/netapp/aws/plugins/module_utils/netapp_module.py
new file mode 100644
index 00000000..3e31ae98
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/plugins/module_utils/netapp_module.py
@@ -0,0 +1,142 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def cmp(a, b):
+ """
+ Python 3 does not have a cmp function, this will do the cmp.
+ :param a: first object to check
+ :param b: second object to check
+ :return:
+ """
+ # convert to lower case for string comparison.
+ if a is None:
+ return -1
+ if isinstance(a, str) and isinstance(b, str):
+ a = a.lower()
+ b = b.lower()
+ # if list has string element, convert string to lower case.
+ if isinstance(a, list) and isinstance(b, list):
+ a = [x.lower() if isinstance(x, str) else x for x in a]
+ b = [x.lower() if isinstance(x, str) else x for x in b]
+ a.sort()
+ b.sort()
+ return (a > b) - (a < b)
+
+
+class NetAppModule(object):
+ '''
+ Common class for NetApp modules
+ set of support functions to derive actions based
+ on the current state of the system, and a desired state
+ '''
+
+ def __init__(self):
+ self.log = list()
+ self.changed = False
+ self.parameters = {'name': 'not intialized'}
+
+ def set_parameters(self, ansible_params):
+ self.parameters = dict()
+ for param in ansible_params:
+ if ansible_params[param] is not None:
+ self.parameters[param] = ansible_params[param]
+ return self.parameters
+
+ def get_cd_action(self, current, desired):
+ ''' takes a desired state and a current state, and return an action:
+ create, delete, None
+ eg:
+ is_present = 'absent'
+ some_object = self.get_object(source)
+ if some_object is not None:
+ is_present = 'present'
+ action = cd_action(current=is_present, desired = self.desired.state())
+ '''
+ if 'state' in desired:
+ desired_state = desired['state']
+ else:
+ desired_state = 'present'
+
+ if current is None and desired_state == 'absent':
+ return None
+ if current is not None and desired_state == 'present':
+ return None
+ # change in state
+ self.changed = True
+ if current is not None:
+ return 'delete'
+ return 'create'
+
+ def compare_and_update_values(self, current, desired, keys_to_compare):
+ updated_values = dict()
+ is_changed = False
+ for key in keys_to_compare:
+ if key in current:
+ if key in desired and desired[key] is not None:
+ if current[key] != desired[key]:
+ updated_values[key] = desired[key]
+ is_changed = True
+ else:
+ updated_values[key] = current[key]
+ else:
+ updated_values[key] = current[key]
+
+ return updated_values, is_changed
+
+ def is_rename_action(self, source, target):
+ ''' takes a source and target object, and returns True
+ if a rename is required
+ eg:
+ source = self.get_object(source_name)
+ target = self.get_object(target_name)
+ action = is_rename_action(source, target)
+ :return: None for error, True for rename action, False otherwise
+ '''
+ if source is None and target is None:
+ # error, do nothing
+ # cannot rename an non existent resource
+ # alternatively we could create B
+ return None
+ if source is not None and target is not None:
+ # error, do nothing
+ # idempotency (or) new_name_is_already_in_use
+ # alternatively we could delete B and rename A to B
+ return False
+ if source is None and target is not None:
+ # do nothing, maybe the rename was already done
+ return False
+ # source is not None and target is None:
+ # rename is in order
+ self.changed = True
+ return True
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_active_directory.py b/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_active_directory.py
new file mode 100644
index 00000000..68b25c7d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_active_directory.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""AWS Cloud Volumes Services - Manage ActiveDirectory"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: aws_netapp_cvs_active_directory
+
+short_description: NetApp AWS CloudVolumes Service Manage Active Directory.
+extends_documentation_fragment:
+ - netapp.aws.netapp.awscvs
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, Update, Delete ActiveDirectory on AWS Cloud Volumes Service.
+
+options:
+ state:
+ description:
+ - Whether the specified ActiveDirectory should exist or not.
+ choices: ['present', 'absent']
+ required: true
+ type: str
+
+ region:
+ description:
+ - The region to which the Active Directory credentials are associated.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Name of the Active Directory domain
+ type: str
+
+ DNS:
+ description:
+ - DNS server address for the Active Directory domain
+ - Required when C(state=present)
+ - Required when C(state=present), to modify ActiveDirectory properties.
+ type: str
+
+ netBIOS:
+ description:
+ - NetBIOS name of the server.
+ type: str
+
+ username:
+ description:
+ - Username of the Active Directory domain administrator
+ type: str
+
+ password:
+ description:
+ - Password of the Active Directory domain administrator
+ - Required when C(state=present), to modify ActiveDirectory properties
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create Active Directory
+ aws_netapp_cvs_active_directory.py:
+ state: present
+ region: us-east-1
+ DNS: 101.102.103.123
+ domain: mydomain.com
+ password: netapp1!
+ netBIOS: testing
+ username: user1
+ api_url : My_CVS_Hostname
+ api_key: My_API_Key
+ secret_key : My_Secret_Key
+
+ - name: Update Active Directory
+ aws_netapp_cvs_active_directory.py:
+ state: present
+ region: us-east-1
+ DNS: 101.102.103.123
+ domain: mydomain.com
+ password: netapp2!
+ netBIOS: testingBIOS
+ username: user2
+ api_url : My_CVS_Hostname
+ api_key: My_API_Key
+ secret_key : My_Secret_Key
+
+ - name: Delete Active Directory
+ aws_netapp_cvs_active_directory.py:
+ state: absent
+ region: us-east-1
+ domain: mydomain.com
+ api_url : My_CVS_Hostname
+ api_key: My_API_Key
+ secret_key : My_Secret_Key
+"""
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.aws.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.aws.plugins.module_utils.netapp import AwsCvsRestAPI
+
+
+class AwsCvsNetappActiveDir(object):
+ """
+ Contains methods to parse arguments,
+ derive details of AWS_CVS objects
+ and send requests to AWS CVS via
+ the restApi
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.aws_cvs_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ region=dict(required=True, type='str'),
+ DNS=dict(required=False, type='str'),
+ domain=dict(required=False, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ netBIOS=dict(required=False, type='str'),
+ username=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['domain', 'password']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic AWSCVS restApi class
+ self.rest_api = AwsCvsRestAPI(self.module)
+
+ def get_activedirectory_id(self):
+ # Check if ActiveDirectory exists
+ # Return UUID for ActiveDirectory is found, None otherwise
+ try:
+ list_activedirectory, dummy = self.rest_api.get('Storage/ActiveDirectory')
+ except Exception:
+ return None
+
+ for activedirectory in list_activedirectory:
+ if activedirectory['region'] == self.parameters['region']:
+ return activedirectory['UUID']
+ return None
+
+ def get_activedirectory(self, activedirectory_id=None):
+ if activedirectory_id is None:
+ return None
+ else:
+ activedirectory_info, error = self.rest_api.get('Storage/ActiveDirectory/%s' % activedirectory_id)
+ if not error:
+ return activedirectory_info
+ return None
+
+ def create_activedirectory(self):
+ # Create ActiveDirectory
+ api = 'Storage/ActiveDirectory'
+ data = {"region": self.parameters['region'], "DNS": self.parameters['DNS'], "domain": self.parameters['domain'],
+ "username": self.parameters['username'], "password": self.parameters['password'], "netBIOS": self.parameters['netBIOS']}
+
+ response, error = self.rest_api.post(api, data)
+
+ if not error:
+ return response
+ else:
+ self.module.fail_json(msg=response['message'])
+
+ def delete_activedirectory(self):
+ activedirectory_id = self.get_activedirectory_id()
+ # Delete ActiveDirectory
+
+ if activedirectory_id:
+ api = 'Storage/ActiveDirectory/' + activedirectory_id
+ data = None
+ response, error = self.rest_api.delete(api, data)
+ if not error:
+ return response
+ else:
+ self.module.fail_json(msg=response['message'])
+
+ else:
+ self.module.fail_json(msg="Active Directory does not exist")
+
+ def update_activedirectory(self, activedirectory_id, updated_activedirectory):
+ # Update ActiveDirectory
+ api = 'Storage/ActiveDirectory/' + activedirectory_id
+ data = {
+ "region": self.parameters['region'],
+ "DNS": updated_activedirectory['DNS'],
+ "domain": updated_activedirectory['domain'],
+ "username": updated_activedirectory['username'],
+ "password": updated_activedirectory['password'],
+ "netBIOS": updated_activedirectory['netBIOS']
+ }
+
+ response, error = self.rest_api.put(api, data)
+ if not error:
+ return response
+ else:
+ self.module.fail_json(msg=response['message'])
+
+ def apply(self):
+ """
+ Perform pre-checks, call functions and exit
+ """
+ modify = False
+ activedirectory_id = self.get_activedirectory_id()
+ current = self.get_activedirectory(activedirectory_id)
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if current and self.parameters['state'] != 'absent':
+ keys_to_check = ['DNS', 'domain', 'username', 'netBIOS']
+ updated_active_directory, modify = self.na_helper.compare_and_update_values(current, self.parameters, keys_to_check)
+
+ if self.parameters['password']:
+ modify = True
+ updated_active_directory['password'] = self.parameters['password']
+
+ if modify is True:
+ self.na_helper.changed = True
+ if 'domain' in self.parameters and self.parameters['domain'] is not None:
+ ad_exists = self.get_activedirectory(updated_active_directory['domain'])
+ if ad_exists:
+ modify = False
+ self.na_helper.changed = False
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify is True:
+ self.update_activedirectory(activedirectory_id, updated_active_directory)
+ elif cd_action == 'create':
+ self.create_activedirectory()
+ elif cd_action == 'delete':
+ self.delete_activedirectory()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Main function
+ """
+ aws_netapp_cvs_active_directory = AwsCvsNetappActiveDir()
+ aws_netapp_cvs_active_directory.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_filesystems.py b/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_filesystems.py
new file mode 100644
index 00000000..037a1d89
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_filesystems.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""AWS Cloud Volumes Services - Manage fileSystem"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: aws_netapp_cvs_filesystems
+
+short_description: NetApp AWS Cloud Volumes Service Manage FileSystem.
+extends_documentation_fragment:
+ - netapp.aws.netapp.awscvs
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, Update, Delete fileSystem on AWS Cloud Volumes Service.
+
+options:
+ state:
+ description:
+ - Whether the specified fileSystem should exist or not.
+ required: true
+ choices: ['present', 'absent']
+ type: str
+
+ region:
+ description:
+ - The region to which the filesystem belongs to.
+ required: true
+ type: str
+
+ creationToken:
+ description:
+ - Name of the filesystem
+ required: true
+ type: str
+
+ quotaInBytes:
+ description:
+ - Size of the filesystem
+ - Required for create
+ type: int
+
+ serviceLevel:
+ description:
+ - Service Level of a filesystem.
+ choices: ['standard', 'premium', 'extreme']
+ type: str
+
+ exportPolicy:
+ description:
+ - The policy rules to export the filesystem
+ type: dict
+ suboptions:
+ rules:
+ description:
+ - Set of rules to export the filesystem
+ - Requires allowedClients, access and protocol
+ type: list
+ elements: dict
+ suboptions:
+ allowedClients:
+ description:
+ - Comma separated list of ip address blocks of the clients to access the fileSystem
+ - Each address block contains the starting IP address and size for the block
+ type: str
+
+ cifs:
+ description:
+ - Enable or disable cifs filesystem
+ type: bool
+
+ nfsv3:
+ description:
+ - Enable or disable nfsv3 fileSystem
+ type: bool
+
+ nfsv4:
+ description:
+ - Enable or disable nfsv4 filesystem
+ type: bool
+
+ ruleIndex:
+ description:
+ - Index number of the rule
+ type: int
+
+ unixReadOnly:
+ description:
+ - Should fileSystem have read only permission or not
+ type: bool
+
+ unixReadWrite:
+ description:
+ - Should fileSystem have read write permission or not
+ type: bool
+'''
+
+EXAMPLES = """
+- name: Create FileSystem
+ aws_netapp_cvs_filesystems:
+ state: present
+ region: us-east-1
+ creationToken: newVolume-1
+ exportPolicy:
+ rules:
+ - allowedClients: 172.16.0.4
+ cifs: False
+ nfsv3: True
+ nfsv4: True
+ ruleIndex: 1
+ unixReadOnly: True
+ unixReadWrite: False
+ quotaInBytes: 100000000000
+ api_url : cds-aws-bundles.netapp.com
+ api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
+ secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
+
+- name: Update FileSystem
+ aws_netapp_cvs_filesystems:
+ state: present
+ region: us-east-1
+ creationToken: newVolume-1
+ exportPolicy:
+ rules:
+ - allowedClients: 172.16.0.4
+ cifs: False
+ nfsv3: True
+ nfsv4: True
+ ruleIndex: 1
+ unixReadOnly: True
+ unixReadWrite: False
+ quotaInBytes: 200000000000
+ api_url : cds-aws-bundles.netapp.com
+ api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
+ secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
+
+- name: Delete FileSystem
+ aws_netapp_cvs_filesystems:
+ state: present
+ region: us-east-1
+ creationToken: newVolume-1
+ quotaInBytes: 100000000000
+ api_url : cds-aws-bundles.netapp.com
+ api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
+ secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
+"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.aws.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.aws.plugins.module_utils.netapp import AwsCvsRestAPI
+
+
+class AwsCvsNetappFileSystem(object):
+ """
+ Contains methods to parse arguments,
+ derive details of AWS_CVS objects
+ and send requests to AWS CVS via
+ the restApi
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.aws_cvs_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ region=dict(required=True, type='str'),
+ creationToken=dict(required=True, type='str'),
+ quotaInBytes=dict(required=False, type='int'),
+ serviceLevel=dict(required=False, choices=['standard', 'premium', 'extreme']),
+ exportPolicy=dict(
+ type='dict',
+ options=dict(
+ rules=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ allowedClients=dict(required=False, type='str'),
+ cifs=dict(required=False, type='bool'),
+ nfsv3=dict(required=False, type='bool'),
+ nfsv4=dict(required=False, type='bool'),
+ ruleIndex=dict(required=False, type='int'),
+ unixReadOnly=dict(required=False, type='bool'),
+ unixReadWrite=dict(required=False, type='bool')
+ )
+ )
+ )
+ ),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['region', 'creationToken', 'quotaInBytes']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Calling generic AWSCVS restApi class
+ self.rest_api = AwsCvsRestAPI(self.module)
+
+ self.data = {}
+ for key in self.parameters.keys():
+ self.data[key] = self.parameters[key]
+
+ def get_filesystem_id(self):
+ # Check given FileSystem is exists
+ # Return fileSystemId is found, None otherwise
+ list_filesystem, error = self.rest_api.get('FileSystems')
+ if error:
+ self.module.fail_json(msg=error)
+
+ for filesystem in list_filesystem:
+ if filesystem['creationToken'] == self.parameters['creationToken']:
+ return filesystem['fileSystemId']
+ return None
+
+ def get_filesystem(self, filesystem_id):
+ # Get FileSystem information by fileSystemId
+ # Return fileSystem Information
+ filesystem_info, error = self.rest_api.get('FileSystems/%s' % filesystem_id)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ return filesystem_info
+ return None
+
+ def is_job_done(self, response):
+ # check jobId is present and equal to 'done'
+ # return True on success, False otherwise
+ try:
+ job_id = response['jobs'][0]['jobId']
+ except TypeError:
+ job_id = None
+
+ if job_id is not None and self.rest_api.get_state(job_id) == 'done':
+ return True
+ return False
+
+ def create_filesystem(self):
+ # Create fileSystem
+ api = 'FileSystems'
+ response, error = self.rest_api.post(api, self.data)
+ if not error:
+ if self.is_job_done(response):
+ return
+ error = "Error: unexpected response on FileSystems create: %s" % str(response)
+ self.module.fail_json(msg=error)
+
+ def delete_filesystem(self, filesystem_id):
+ # Delete FileSystem
+ api = 'FileSystems/' + filesystem_id
+ self.data = None
+ response, error = self.rest_api.delete(api, self.data)
+ if not error:
+ if self.is_job_done(response):
+ return
+ error = "Error: unexpected response on FileSystems delete: %s" % str(response)
+ self.module.fail_json(msg=error)
+
+ def update_filesystem(self, filesystem_id):
+ # Update FileSystem
+ api = 'FileSystems/' + filesystem_id
+ response, error = self.rest_api.put(api, self.data)
+ if not error:
+ if self.is_job_done(response):
+ return
+ error = "Error: unexpected response on FileSystems update: %s" % str(response)
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ """
+ Perform pre-checks, call functions and exit
+ """
+
+ filesystem = None
+ filesystem_id = self.get_filesystem_id()
+
+ if filesystem_id:
+ # Getting the FileSystem details
+ filesystem = self.get_filesystem(filesystem_id)
+
+ cd_action = self.na_helper.get_cd_action(filesystem, self.parameters)
+
+ if cd_action is None and self.parameters['state'] == 'present':
+ # Check if we need to update the fileSystem
+ update_filesystem = False
+ if filesystem['quotaInBytes'] is not None and 'quotaInBytes' in self.parameters \
+ and filesystem['quotaInBytes'] != self.parameters['quotaInBytes']:
+ update_filesystem = True
+ elif filesystem['creationToken'] is not None and 'creationToken' in self.parameters \
+ and filesystem['creationToken'] != self.parameters['creationToken']:
+ update_filesystem = True
+ elif filesystem['serviceLevel'] is not None and 'serviceLevel' in self.parameters \
+ and filesystem['serviceLevel'] != self.parameters['serviceLevel']:
+ update_filesystem = True
+ elif filesystem['exportPolicy']['rules'] is not None and 'exportPolicy' in self.parameters:
+ for rule_org in filesystem['exportPolicy']['rules']:
+ for rule in self.parameters['exportPolicy']['rules']:
+ if rule_org['allowedClients'] != rule['allowedClients']:
+ update_filesystem = True
+ elif rule_org['unixReadOnly'] != rule['unixReadOnly']:
+ update_filesystem = True
+ elif rule_org['unixReadWrite'] != rule['unixReadWrite']:
+ update_filesystem = True
+
+ if update_filesystem:
+ self.na_helper.changed = True
+
+ result_message = ""
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ # Skip changes
+ result_message = "Check mode, skipping changes"
+ else:
+ if cd_action == "create":
+ self.create_filesystem()
+ result_message = "FileSystem Created"
+ elif cd_action == "delete":
+ self.delete_filesystem(filesystem_id)
+ result_message = "FileSystem Deleted"
+ else: # modify
+ self.update_filesystem(filesystem_id)
+ result_message = "FileSystem Updated"
+ self.module.exit_json(changed=self.na_helper.changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ aws_cvs_netapp_filesystem = AwsCvsNetappFileSystem()
+ aws_cvs_netapp_filesystem.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_pool.py b/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_pool.py
new file mode 100644
index 00000000..fa4818a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_pool.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""AWS Cloud Volumes Services - Manage Pools"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: aws_netapp_cvs_pool
+
+short_description: NetApp AWS Cloud Volumes Service Manage Pools.
+extends_documentation_fragment:
+ - netapp.aws.netapp.awscvs
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, Update, Delete Pool on AWS Cloud Volumes Service.
+
+options:
+ state:
+ description:
+ - Whether the specified pool should exist or not.
+ choices: ['present', 'absent']
+ required: true
+ type: str
+ region:
+ description:
+ - The region to which the Pool is associated.
+ required: true
+ type: str
+ name:
+ description:
+ - pool name ( The human readable name of the Pool )
+ - name can be used for create, update and delete operations
+ required: true
+ type: str
+ serviceLevel:
+ description:
+ - The service level of the Pool
+ - can be used with pool create, update operations
+ choices: ['basic', 'standard', 'extreme']
+ type: str
+ sizeInBytes:
+ description:
+ - Size of the Pool in bytes
+ - can be used with pool create, update operations
+ - minimum value is 4000000000000 bytes
+ type: int
+ vendorID:
+ description:
+ - A vendor ID for the Pool. E.g. an ID allocated by a vendor service for the Pool.
+ - can be used with pool create, update operations
+ - must be unique
+ type: str
+ from_name:
+ description:
+ - rename the existing pool name ( The human readable name of the Pool )
+ - I(from_name) is the existing name, and I(name) the new name
+ - can be used with update operation
+ type: str
+'''
+
+EXAMPLES = """
+- name: Create a new Pool
+ aws_netapp_cvs_pool:
+ state: present
+ name: TestPoolBB12
+ serviceLevel: extreme
+ sizeInBytes: 4000000000000
+ vendorID: ansiblePoolTestVendorBB12
+ region: us-east-1
+ api_url: cds-aws-bundles.netapp.com
+ api_key: MyAPiKey
+ secret_key: MySecretKey
+
+- name: Delete a Pool
+ aws_netapp_cvs_pool:
+ state: absent
+ name: TestPoolBB7
+ region: us-east-1
+ api_url: cds-aws-bundles.netapp.com
+ api_key: MyAPiKey
+ secret_key: MySecretKey
+
+- name: Update a Pool
+ aws_netapp_cvs_pool:
+ state: present
+ from_name: TestPoolBB12
+ name: Mynewpool7
+ vendorID: ansibleVendorMynewpool15
+ serviceLevel: extreme
+ sizeInBytes: 4000000000000
+ region: us-east-1
+ api_url: cds-aws-bundles.netapp.com
+ api_key: MyAPiKey
+ secret_key: MySecretKey
+
+"""
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.aws.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.aws.plugins.module_utils.netapp import AwsCvsRestAPI
+
+
+class NetAppAWSCVS(object):
+ '''Class for Pool operations '''
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ """
+ self.argument_spec = netapp_utils.aws_cvs_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ region=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ serviceLevel=dict(required=False, choices=['basic', 'standard', 'extreme'], type='str'),
+ sizeInBytes=dict(required=False, type='int'),
+ vendorID=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = AwsCvsRestAPI(self.module)
+ self.sizeinbytes_min_value = 4000000000000
+
+ def get_aws_netapp_cvs_pool(self, name=None):
+ """
+ Returns Pool object if exists else Return None
+ """
+ pool_info = None
+
+ if name is None:
+ name = self.parameters['name']
+
+ pools, error = self.rest_api.get('Pools')
+
+ if error is None and pools is not None:
+ for pool in pools:
+ if 'name' in pool and pool['region'] == self.parameters['region']:
+ if pool['name'] == name:
+ pool_info = pool
+ break
+
+ return pool_info
+
+ def create_aws_netapp_cvs_pool(self):
+ """
+ Create a pool
+ """
+ api = 'Pools'
+
+ for key in ['serviceLevel', 'sizeInBytes', 'vendorID']:
+ if key not in self.parameters.keys() or self.parameters[key] is None:
+ self.module.fail_json(changed=False, msg="Mandatory key '%s' required" % (key))
+
+ pool = {
+ "name": self.parameters['name'],
+ "region": self.parameters['region'],
+ "serviceLevel": self.parameters['serviceLevel'],
+ "sizeInBytes": self.parameters['sizeInBytes'],
+ "vendorID": self.parameters['vendorID']
+ }
+
+ dummy, error = self.rest_api.post(api, pool)
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def update_aws_netapp_cvs_pool(self, update_pool_info, pool_id):
+ """
+ Update a pool
+ """
+ api = 'Pools/' + pool_id
+
+ pool = {
+ "name": update_pool_info['name'],
+ "region": self.parameters['region'],
+ "serviceLevel": update_pool_info['serviceLevel'],
+ "sizeInBytes": update_pool_info['sizeInBytes'],
+ "vendorID": update_pool_info['vendorID']
+ }
+
+ dummy, error = self.rest_api.put(api, pool)
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def delete_aws_netapp_cvs_pool(self, pool_id):
+ """
+ Delete a pool
+ """
+ api = 'Pools/' + pool_id
+ data = None
+ dummy, error = self.rest_api.delete(api, data)
+
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def apply(self):
+ """
+ Perform pre-checks, call functions and exit
+ """
+ update_required = False
+ cd_action = None
+
+ if 'sizeInBytes' in self.parameters.keys() and self.parameters['sizeInBytes'] < self.sizeinbytes_min_value:
+ self.module.fail_json(changed=False, msg="sizeInBytes should be greater than or equal to %d" % (self.sizeinbytes_min_value))
+
+ current = self.get_aws_netapp_cvs_pool()
+ if self.parameters.get('from_name'):
+ existing = self.get_aws_netapp_cvs_pool(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(existing, current)
+ if rename is None:
+ self.module.fail_json(changed=False, msg="unable to rename pool: '%s' does not exist" % self.parameters['from_name'])
+ if rename:
+ current = existing
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if cd_action is None and self.parameters['state'] == 'present':
+ keys_to_check = ['name', 'vendorID', 'sizeInBytes', 'serviceLevel']
+ update_pool_info, update_required = self.na_helper.compare_and_update_values(current, self.parameters, keys_to_check)
+
+ if update_required is True:
+ self.na_helper.changed = True
+ cd_action = 'update'
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'update':
+ self.update_aws_netapp_cvs_pool(update_pool_info, current['poolId'])
+ elif cd_action == 'create':
+ self.create_aws_netapp_cvs_pool()
+ elif cd_action == 'delete':
+ self.delete_aws_netapp_cvs_pool(current['poolId'])
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ aws_cvs_netapp_pool = NetAppAWSCVS()
+ aws_cvs_netapp_pool.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_snapshots.py b/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_snapshots.py
new file mode 100644
index 00000000..fa5c5f87
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_snapshots.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""AWS Cloud Volumes Services - Manage Snapshots"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: aws_netapp_cvs_snapshots
+
+short_description: NetApp AWS Cloud Volumes Service Manage Snapshots.
+extends_documentation_fragment:
+ - netapp.aws.netapp.awscvs
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, Update, Delete Snapshot on AWS Cloud Volumes Service.
+
+options:
+ state:
+ description:
+ - Whether the specified snapshot should exist or not.
+ required: true
+ type: str
+ choices: ['present', 'absent']
+
+ region:
+ description:
+ - The region to which the snapshot belongs to.
+ required: true
+ type: str
+
+ name:
+ description:
+ - Name of the snapshot
+ required: true
+ type: str
+
+ fileSystemId:
+ description:
+ - Name or Id of the filesystem.
+ - Required for create operation
+ type: str
+
+ from_name:
+ description:
+ - ID or Name of the snapshot to rename.
+ - Required to create an snapshot called 'name' by renaming 'from_name'.
+ type: str
+'''
+
+EXAMPLES = """
+- name: Create Snapshot
+ aws_netapp_cvs_snapshots:
+ state: present
+ region: us-east-1
+ name: testSnapshot
+ fileSystemId: testVolume
+ api_url : cds-aws-bundles.netapp.com
+ api_key: myApiKey
+ secret_key : mySecretKey
+
+- name: Update Snapshot
+ aws_netapp_cvs_snapshots:
+ state: present
+ region: us-east-1
+ name: testSnapshot - renamed
+ from_name: testSnapshot
+ fileSystemId: testVolume
+ api_url : cds-aws-bundles.netapp.com
+ api_key: myApiKey
+ secret_key : mySecretKey
+
+- name: Delete Snapshot
+ aws_netapp_cvs_snapshots:
+ state: absent
+ region: us-east-1
+ name: testSnapshot
+ api_url : cds-aws-bundles.netapp.com
+ api_key: myApiKey
+ secret_key : mySecretKey
+"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.aws.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.aws.plugins.module_utils.netapp import AwsCvsRestAPI
+
+
+class AwsCvsNetappSnapshot(object):
+ """
+ Contains methods to parse arguments,
+ derive details of AWS_CVS objects
+ and send requests to AWS CVS via
+ the restApi
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.aws_cvs_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ region=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ fileSystemId=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['fileSystemId']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic AWSCVS restApi class
+ self.rest_api = AwsCvsRestAPI(self.module)
+
+ # Checking for the parameters passed and create new parameters list
+ self.data = {}
+ for key in self.parameters.keys():
+ self.data[key] = self.parameters[key]
+
+ def get_snapshot_id(self, name):
+ # Check if snapshot exists
+ # Return snpashot Id If Snapshot is found, None otherwise
+ list_snapshots, error = self.rest_api.get('Snapshots')
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ for snapshot in list_snapshots:
+ if snapshot['name'] == name:
+ return snapshot['snapshotId']
+ return None
+
+ def get_filesystem_id(self):
+ # Check given FileSystem is exists
+ # Return fileSystemId is found, None otherwise
+ list_filesystem, error = self.rest_api.get('FileSystems')
+
+ if error:
+ self.module.fail_json(msg=error)
+ for filesystem in list_filesystem:
+ if filesystem['fileSystemId'] == self.parameters['fileSystemId']:
+ return filesystem['fileSystemId']
+ elif filesystem['creationToken'] == self.parameters['fileSystemId']:
+ return filesystem['fileSystemId']
+ return None
+
+ def create_snapshot(self):
+ # Create Snapshot
+ api = 'Snapshots'
+ dummy, error = self.rest_api.post(api, self.data)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def rename_snapshot(self, snapshot_id):
+ # Rename Snapshot
+ api = 'Snapshots/' + snapshot_id
+ dummy, error = self.rest_api.put(api, self.data)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def delete_snapshot(self, snapshot_id):
+ # Delete Snapshot
+ api = 'Snapshots/' + snapshot_id
+ dummy, error = self.rest_api.delete(api, self.data)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ """
+ Perform pre-checks, call functions and exit
+ """
+ self.snapshot_id = self.get_snapshot_id(self.data['name'])
+
+ if self.snapshot_id is None and 'fileSystemId' in self.data:
+ self.filesystem_id = self.get_filesystem_id()
+ self.data['fileSystemId'] = self.filesystem_id
+ if self.filesystem_id is None:
+ self.module.fail_json(msg='Error: Specified filesystem id %s does not exist ' % self.data['fileSystemId'])
+
+ cd_action = self.na_helper.get_cd_action(self.snapshot_id, self.data)
+ result_message = ""
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ # Skip changes
+ result_message = "Check mode, skipping changes"
+ else:
+ if cd_action == "delete":
+ self.delete_snapshot(self.snapshot_id)
+ result_message = "Snapshot Deleted"
+
+ elif cd_action == "create":
+ if 'from_name' in self.data:
+ # If cd_action is craete and from_name is given
+ snapshot_id = self.get_snapshot_id(self.data['from_name'])
+ if snapshot_id is not None:
+ # If resource pointed by from_name exists, rename the snapshot to name
+ self.rename_snapshot(snapshot_id)
+ result_message = "Snapshot Updated"
+ else:
+ # If resource pointed by from_name does not exists, error out
+ self.module.fail_json(msg="Resource does not exist : %s" % self.data['from_name'])
+ else:
+ self.create_snapshot()
+ # If from_name is not defined, Create from scratch.
+ result_message = "Snapshot Created"
+
+ self.module.exit_json(changed=self.na_helper.changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ aws_netapp_cvs_snapshots = AwsCvsNetappSnapshot()
+ aws_netapp_cvs_snapshots.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/__init__.py b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/__init__.py
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/builtins.py b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/builtins.py
new file mode 100644
index 00000000..f60ee678
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/builtins.py
@@ -0,0 +1,33 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/mock.py b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/mock.py
new file mode 100644
index 00000000..0972cd2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/unittest.py b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/unittest.py
new file mode 100644
index 00000000..98f08ad6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/compat/unittest.py
@@ -0,0 +1,38 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py
new file mode 100644
index 00000000..abcc3fbd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py
@@ -0,0 +1,111 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.aws.tests.unit.compat import unittest
+from ansible_collections.netapp.aws.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_active_directory \
+ import AwsCvsNetappActiveDir as ad_module
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_fail_check(self):
+ return dict({
+ 'state': 'present',
+ 'DNS': '101.102.103.123',
+ 'domain': 'mydomain.com',
+ 'password': 'netapp1!',
+ 'username': 'myuser',
+ 'api_url': 'myapiurl.com',
+ 'secret_key': 'mysecretkey',
+ 'api_key': 'myapikey'
+ })
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'DNS': '101.102.103.123',
+ 'domain': 'mydomain.com',
+ 'password': 'netapp1!',
+ 'region': 'us-east-1',
+ 'netBIOS': 'testing',
+ 'username': 'myuser',
+ 'api_url': 'myapiurl.com',
+ 'secret_key': 'mysecretkey',
+ 'api_key': 'myapikey'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(self.set_default_args_fail_check())
+ ad_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_module_fail_when_required_args_present(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ ad_module()
+ exit_json(changed=True, msg="TestCase Fail when required ars are present")
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_active_directory.AwsCvsNetappActiveDir.get_activedirectory_id')
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_active_directory.AwsCvsNetappActiveDir.get_activedirectory')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post')
+ def test_create_aws_netapp_cvs_activedir(self, get_post_api, get_aws_api, get_ad_id):
+ set_module_args(self.set_default_args_pass_check())
+ my_obj = ad_module()
+
+ get_aws_api.return_value = None
+ get_post_api.return_value = None, None
+ get_ad_id.return_value = "123"
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_aws_netapp_cvs_active_directory: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py
new file mode 100644
index 00000000..9712db10
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py
@@ -0,0 +1,148 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests AWS CVS FileSystems Ansible module: aws_netapp_cvs_filesystems'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.aws.tests.unit.compat import unittest
+from ansible_collections.netapp.aws.tests.unit.compat.mock import patch
+from ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_filesystems \
+ import AwsCvsNetappFileSystem as fileSystem_module
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_fail_check(self):
+ return dict({
+ 'creationToken': 'TestFilesystem',
+ 'region': 'us-east-1',
+ 'quotaInBytes': 3424,
+ 'serviceLevel': 'standard',
+ 'api_url': 'hostname.com',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'creationToken': 'TestFilesystem',
+ 'region': 'us-east-1',
+ 'quotaInBytes': 3424,
+ 'serviceLevel': 'standard',
+ 'api_url': 'hostname.com',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_args_create_aws_netapp_cvs_filesystems(self):
+ return dict({
+ 'state': 'present',
+ 'creationToken': 'TestFilesystem',
+ 'region': 'us-east-1',
+ 'quotaInBytes': 3424,
+ 'serviceLevel': 'standard',
+ 'api_url': 'hostname.com',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_args_delete_aws_netapp_cvs_filesystems(self):
+ return dict({
+ 'state': 'absent',
+ 'creationToken': 'TestFilesystem',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.com',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(self.set_default_args_fail_check())
+ fileSystem_module()
+ print('Info: test_module_fail_when_required_args_missing: %s' % exc.value.args[0]['msg'])
+
+ def test_module_fail_when_required_args_present(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ fileSystem_module()
+ exit_json(changed=True, msg="Induced arguments check")
+ print('Info: test_module_fail_when_required_args_present: %s' % exc.value.args[0]['msg'])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_filesystems.AwsCvsNetappFileSystem.get_filesystem_id')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.get_state')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post')
+ def test_create_aws_netapp_cvs_snapshots_pass(self, get_post_api, get_state_api, get_filesystem_id):
+ set_module_args(self.set_args_create_aws_netapp_cvs_filesystems())
+ my_obj = fileSystem_module()
+ get_filesystem_id.return_value = None
+ get_state_api.return_value = 'done'
+ response = {'jobs': [{'jobId': 'dummy'}]}
+ get_post_api.return_value = response, None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_aws_netapp_cvs_filesystem_pass: %s' % repr(exc.value.args[0]))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_filesystems.AwsCvsNetappFileSystem.get_filesystem_id')
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_filesystems.AwsCvsNetappFileSystem.get_filesystem')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.get_state')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.delete')
+ def test_delete_aws_netapp_cvs_snapshots_pass(self, get_post_api, get_state_api, get_filesystem, get_filesystem_id):
+ set_module_args(self.set_args_delete_aws_netapp_cvs_filesystems())
+ my_obj = fileSystem_module()
+ get_filesystem_id.return_value = '432-432-532423-4232'
+ get_filesystem.return_value = 'dummy'
+ get_state_api.return_value = 'done'
+ response = {'jobs': [{'jobId': 'dummy'}]}
+ get_post_api.return_value = response, None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_aws_netapp_cvs_filesyste_pass: %s' % repr(exc.value.args[0]))
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py
new file mode 100644
index 00000000..68d038db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py
@@ -0,0 +1,251 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' Unit tests for AWS Cloud Volumes Services - Manage Pools '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.aws.tests.unit.compat import unittest
+from ansible_collections.netapp.aws.tests.unit.compat.mock import patch
+from ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool \
+ import NetAppAWSCVS as pool_module
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_fail_check(self):
+ return dict({
+ 'from_name': 'TestPoolAA',
+ 'name': 'TestPoolAA_new',
+ 'serviceLevel': 'standard',
+ 'sizeInBytes': 4000000000000,
+ 'vendorID': 'ansiblePoolTestVendorA',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.invalid',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'from_name': 'TestPoolAA',
+ 'name': 'TestPoolAA_new',
+ 'serviceLevel': 'standard',
+ 'sizeInBytes': 4000000000000,
+ 'vendorID': 'ansiblePoolTestVendorA',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.invalid',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_args_create_aws_netapp_cvs_pool(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestPoolAA',
+ 'serviceLevel': 'standard',
+ 'sizeInBytes': 4000000000000,
+ 'vendorID': 'ansiblePoolTestVendorA',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.invalid',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_args_update_aws_netapp_cvs_pool(self):
+ return dict({
+ 'state': 'present',
+ 'from_name': 'TestPoolAA',
+ 'name': 'TestPoolAA_new',
+ 'serviceLevel': 'standard',
+ 'sizeInBytes': 4000000000000,
+ 'vendorID': 'ansiblePoolTestVendorA',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.invalid',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_args_delete_aws_netapp_cvs_pool(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'TestPoolAA',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.invalid',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(self.set_default_args_fail_check())
+ pool_module()
+ print('Info: test_module_fail_when_required_args_missing: %s' % exc.value.args[0]['msg'])
+
+ def test_module_pass_when_required_args_present(self):
+ ''' required arguments are present '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ pool_module()
+ exit_json(changed=True, msg="Induced arguments check")
+ print('Info: test_module_pass_when_required_args_present: %s' % exc.value.args[0]['msg'])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.put')
+ def test_update_aws_netapp_cvs_pool_pass(self, get_put_api, get_aws_api):
+ set_module_args(self.set_args_update_aws_netapp_cvs_pool())
+ my_obj = pool_module()
+ my_pool = {
+ "name": "Dummyname",
+ "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975",
+ "region": "us-east-1",
+ "serviceLevel": "extreme",
+ "sizeInBytes": 40000000000000000,
+ "state": "available",
+ "vendorID": "Dummy"
+ }
+ get_aws_api.return_value = my_pool
+ get_put_api.return_value = my_pool, None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_update_aws_netapp_cvs_pool_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.put')
+ def test_update_aws_netapp_cvs_pool_fail(self, get_put_api, get_aws_api):
+ set_module_args(self.set_args_update_aws_netapp_cvs_pool())
+ my_obj = pool_module()
+ my_pool = {
+ "name": "Dummyname",
+ "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975",
+ "region": "us-east-1",
+ "serviceLevel": "extreme",
+ "sizeInBytes": 40000000000000000,
+ "state": "available",
+ "vendorID": "Dummy"
+ }
+ get_put_api.return_value = my_pool, "Error"
+ get_aws_api.return_value = my_pool
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print('Info: test_update_aws_netapp_cvs_pool_fail: %s' % repr(exc.value))
+ assert exc.value.args[0]['msg'] is not None
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post')
+ def test_create_aws_netapp_cvs_pool_pass(self, get_post_api, get_aws_api):
+ set_module_args(self.set_args_create_aws_netapp_cvs_pool())
+ my_obj = pool_module()
+ get_aws_api.return_value = None
+ get_post_api.return_value = None, None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_aws_netapp_cvs_pool_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post')
+ def test_create_aws_netapp_cvs_pool_fail(self, get_post_api, get_aws_api):
+ set_module_args(self.set_args_create_aws_netapp_cvs_pool())
+ my_obj = pool_module()
+ my_pool = {
+ "name": "Dummyname",
+ "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975",
+ "region": "us-east-1",
+ "serviceLevel": "extreme",
+ "sizeInBytes": 40000000000000000,
+ "state": "available",
+ "vendorID": "Dummy"
+ }
+ get_post_api.return_value = my_pool, "Error"
+ get_aws_api.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_aws_netapp_cvs_pool_fail: %s' % repr(exc.value))
+ assert exc.value.args[0]['msg'] is not None
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.delete')
+ def test_delete_aws_netapp_cvs_pool_pass(self, get_delete_api, get_aws_api):
+ set_module_args(self.set_args_delete_aws_netapp_cvs_pool())
+ my_obj = pool_module()
+ my_pool = {
+ "name": "Dummyname",
+ "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975",
+ "region": "us-east-1",
+ "serviceLevel": "extreme",
+ "sizeInBytes": 40000000000000000,
+ "state": "available",
+ "vendorID": "Dummy"
+ }
+ get_aws_api.return_value = my_pool
+ get_delete_api.return_value = None, None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_aws_netapp_cvs_pool_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.delete')
+ def test_delete_aws_netapp_cvs_pool_fail(self, get_delete_api, get_aws_api):
+ set_module_args(self.set_args_delete_aws_netapp_cvs_pool())
+ my_obj = pool_module()
+ my_pool = {
+ "name": "Dummyname",
+ "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975",
+ "region": "us-east-1",
+ "serviceLevel": "extreme",
+ "sizeInBytes": 40000000000000000,
+ "state": "available",
+ "vendorID": "Dummy"
+ }
+ get_delete_api.return_value = my_pool, "Error"
+ get_aws_api.return_value = my_pool
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_aws_netapp_cvs_pool_fail: %s' % repr(exc.value))
+ assert exc.value.args[0]['msg'] is not None
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py
new file mode 100644
index 00000000..1f4c4bbe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py
@@ -0,0 +1,140 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests NetApp AWS CVS Snapshots Ansible module: aws_netapp_cvs_snapshots'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.aws.tests.unit.compat import unittest
+from ansible_collections.netapp.aws.tests.unit.compat.mock import patch
+from ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_snapshots \
+ import AwsCvsNetappSnapshot as snapshot_module
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_fail_check(self):
+ return dict({
+ 'name': 'TestFilesystem',
+ 'fileSystemId': 'standard',
+ 'from_name': 'from_TestFilesystem',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.com',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'testSnapshot',
+ 'fileSystemId': 'standard',
+ 'from_name': 'from_TestFilesystem',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.com',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_args_create_aws_netapp_cvs_snapshots(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'testSnapshot',
+ 'fileSystemId': '123-4213-432-432',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.com',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def set_args_delete_aws_netapp_cvs_snapshots(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'testSnapshot',
+ 'region': 'us-east-1',
+ 'api_url': 'hostname.com',
+ 'api_key': 'myapikey',
+ 'secret_key': 'mysecretkey'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(self.set_default_args_fail_check())
+ snapshot_module()
+ print('Info: test_module_fail_when_required_args_missing: %s' % exc.value.args[0]['msg'])
+
+ def test_module_fail_when_required_args_present(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ snapshot_module()
+ exit_json(changed=True, msg="Induced arguments check")
+ print('Info: test_module_fail_when_required_args_present: %s' % exc.value.args[0]['msg'])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_snapshots.AwsCvsNetappSnapshot.get_snapshot_id')
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_snapshots.AwsCvsNetappSnapshot.get_filesystem_id')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post')
+ def test_create_aws_netapp_cvs_snapshots_pass(self, get_post_api, get_filesystem_id, get_snapshot_id):
+ set_module_args(self.set_args_create_aws_netapp_cvs_snapshots())
+ my_obj = snapshot_module()
+ get_filesystem_id.return_value = 'fiesystemName'
+ get_snapshot_id.return_value = None
+ get_post_api.return_value = None, None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_aws_netapp_cvs_snapshots_pass: %s' % repr(exc.value.args[0]))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_snapshots.AwsCvsNetappSnapshot.get_snapshot_id')
+ @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.delete')
+ def test_delete_aws_netapp_cvs_snapshots_pass(self, get_post_api, get_snapshot_id):
+ set_module_args(self.set_args_delete_aws_netapp_cvs_snapshots())
+ my_obj = snapshot_module()
+ get_snapshot_id.return_value = "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975"
+ get_post_api.return_value = None, None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_aws_netapp_cvs_snapshots_pass: %s' % repr(exc.value.args[0]))
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/requirements.txt b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/requirements.txt
new file mode 100644
index 00000000..46fbfa46
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/aws/tests/unit/requirements.txt
@@ -0,0 +1 @@
+unittest2 ; python_version < '2.7'
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/CHANGELOG.rst b/collections-debian-merged/ansible_collections/netapp/elementsw/CHANGELOG.rst
new file mode 100644
index 00000000..77214063
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/CHANGELOG.rst
@@ -0,0 +1,147 @@
+=========================================
+NetApp ElementSW Collection Release Notes
+=========================================
+
+.. contents:: Topics
+
+
+v20.10.0
+========
+
+Minor Changes
+-------------
+
+- na_elementsw_cluster - add new options ``encryption``, ``order_number``, and ``serial_number``.
+- na_elementsw_network_interfaces - make all options not required, so that only bond_1g can be set for example.
+- na_elementsw_network_interfaces - restructure options into 2 dictionaries ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow all older options.
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_info - NetApp Element Software Info
+
+v20.9.1
+=======
+
+Bugfixes
+--------
+
+- na_elementsw_node - improve error reporting when cluster name cannot be set because node is already active.
+- na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back
+
+v20.9.0
+=======
+
+Minor Changes
+-------------
+
+- na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes.
+- na_elementsw_node - ``preset_only`` to only set the cluster name before creating a cluster with na_elementsw_cluster.
+- na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or ID.
+
+Bugfixes
+--------
+
+- na_elementsw_node - fix check_mode so that no action is taken.
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_qos_policy - NetApp Element Software create/modify/rename/delete QOS Policy
+
+v20.8.0
+=======
+
+Minor Changes
+-------------
+
+- add "required:true" where missing.
+- add "type:str" (or int, dict) where missing in documentation section.
+- na_elementsw_drive - add all drives in a cluster, allow for a list of nodes or a list of drives.
+- remove "required:true" for state and use present as default.
+- use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+
+Bugfixes
+--------
+
+- na_elementsw_access_group - fix check_mode so that no action is taken.
+- na_elementsw_admin_users - fix check_mode so that no action is taken.
+- na_elementsw_cluster - create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create.
+- na_elementsw_cluster_snmp - double exception because of AttributeError.
+- na_elementsw_drive - node_id or drive_id were not handled properly when using numeric ids.
+- na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups was ignored and redundant.
+- na_elementsw_ldap - double exception because of AttributeError.
+- na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency), remove default values and fix documentation.
+- na_elementsw_vlan - AttributeError if VLAN already exists.
+- na_elementsw_vlan - change in attributes was ignored.
+- na_elementsw_vlan - fix check_mode so that no action is taken.
+- na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation.
+- na_elementsw_volume - double exception because of AttributeError.
+
+v20.6.0
+=======
+
+Bugfixes
+--------
+
+- galaxy.yml - fix repository and homepage links.
+
+v20.2.0
+=======
+
+Bugfixes
+--------
+
+- galaxy.yml - fix path to github repository.
+- netapp.py - report error in case of connection error rather than raising a generic exception by default.
+
+v20.1.0
+=======
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_access_group_volumes - NetApp Element Software Add/Remove Volumes to/from Access Group
+
+v19.10.0
+========
+
+Minor Changes
+-------------
+
+- refactor existing modules as a collection
+
+v2.8.0
+======
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_cluster_config - Configure Element SW Cluster
+- netapp.elementsw.na_elementsw_cluster_snmp - Configure Element SW Cluster SNMP
+- netapp.elementsw.na_elementsw_initiators - Manage Element SW initiators
+
+v2.7.0
+======
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_access_group - NetApp Element Software Manage Access Groups
+- netapp.elementsw.na_elementsw_account - NetApp Element Software Manage Accounts
+- netapp.elementsw.na_elementsw_admin_users - NetApp Element Software Manage Admin Users
+- netapp.elementsw.na_elementsw_backup - NetApp Element Software Create Backups
+- netapp.elementsw.na_elementsw_check_connections - NetApp Element Software Check connectivity to MVIP and SVIP.
+- netapp.elementsw.na_elementsw_cluster - NetApp Element Software Create Cluster
+- netapp.elementsw.na_elementsw_cluster_pair - NetApp Element Software Manage Cluster Pair
+- netapp.elementsw.na_elementsw_drive - NetApp Element Software Manage Node Drives
+- netapp.elementsw.na_elementsw_ldap - NetApp Element Software Manage ldap admin users
+- netapp.elementsw.na_elementsw_network_interfaces - NetApp Element Software Configure Node Network Interfaces
+- netapp.elementsw.na_elementsw_node - NetApp Element Software Node Operation
+- netapp.elementsw.na_elementsw_snapshot - NetApp Element Software Manage Snapshots
+- netapp.elementsw.na_elementsw_snapshot_restore - NetApp Element Software Restore Snapshot
+- netapp.elementsw.na_elementsw_snapshot_schedule - NetApp Element Software Snapshot Schedules
+- netapp.elementsw.na_elementsw_vlan - NetApp Element Software Manage VLAN
+- netapp.elementsw.na_elementsw_volume - NetApp Element Software Manage Volumes
+- netapp.elementsw.na_elementsw_volume_clone - NetApp Element Software Create Volume Clone
+- netapp.elementsw.na_elementsw_volume_pair - NetApp Element Software Volume Pair
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/FILES.json b/collections-debian-merged/ansible_collections/netapp/elementsw/FILES.json
new file mode 100644
index 00000000..4cdde84a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/FILES.json
@@ -0,0 +1,551 @@
+{
+ "files": [
+ {
+ "format": 1,
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": ".",
+ "chksum_type": null
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/doc_fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fd42778f85cd3b989604d0227af4cc90350d94f5864938eb0bd29cf7a66401c3",
+ "name": "plugins/doc_fragments/netapp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/module_utils",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9e4e50f70b91fb0e257515b995e529e639db14d0bcb1b170401f85fe7e238afa",
+ "name": "plugins/module_utils/netapp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a98ea2d0aec17e10c6b5a956cfaa1dcddbd336b674079a1f86e85429381a49e7",
+ "name": "plugins/module_utils/netapp_module.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "33132c95ba546d56bf953e1613dd39ad8a258379b3a32120f7be8b19e2c0d8a2",
+ "name": "plugins/module_utils/netapp_elementsw_module.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/modules",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4a0e280ee9ef13b994f98c848524dc53b3a3a16559e3d1e22be6573272327c8c",
+ "name": "plugins/modules/na_elementsw_initiators.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c03c3451de6afd8005c100d3d1f1b7cad55a68543650af9adf457adb130235b4",
+ "name": "plugins/modules/na_elementsw_qos_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1ee85a0b9e6ac2b0151a52b7722a43ea3e358d48f48816f5fac597151fd58d93",
+ "name": "plugins/modules/na_elementsw_cluster_snmp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "25b0f4b869b1b814160da50df5b7b06d0e5d3eb83ca8887a0fead337699d6c62",
+ "name": "plugins/modules/na_elementsw_snapshot.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a4b329b6f3c13f500a95ad0fb40eba4db5873b78b0c137997c858229336011af",
+ "name": "plugins/modules/na_elementsw_volume.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "532fbf39ed0ee98af0e9323f037ab0e0f52d5eac9179a82eeb169a5a48cdfd3e",
+ "name": "plugins/modules/na_elementsw_access_group_volumes.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1fa2d61732ffb840991320b1a176bbf0fee9cbed3dda1027f6b3c3adb7f82ebf",
+ "name": "plugins/modules/na_elementsw_snapshot_schedule.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6882747383c770c6ec43585e3a4db0081c8de165415d40941532324208e3aa4e",
+ "name": "plugins/modules/na_elementsw_node.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7099bfffb1ec35ed7c0a40c0708cb4d1d79f6267b16fcc71f759796add15edaf",
+ "name": "plugins/modules/na_elementsw_access_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ddd54266eb0a3ebf891d8c1310059b40cfbad7679db3d7f2b9c600baf31e42ca",
+ "name": "plugins/modules/na_elementsw_cluster_pair.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ead937f30287dfd02521b4fdda1e0a128cd1d3ba8db4a721330ff4bbfb76e284",
+ "name": "plugins/modules/na_elementsw_volume_pair.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6dc94b752a4931e30ea169f61aec3919a7cd7636ce3aeff4764094d2adc355f7",
+ "name": "plugins/modules/na_elementsw_cluster_config.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d906dcdfe52a88b0458c7fc3abcf77ef0bc0a7640db1aedf6b4071f9bae106f2",
+ "name": "plugins/modules/na_elementsw_info.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8b8a59c8c45c1aa147c2d90b01654135f31ac4a1e31c643ce3b07007d6f28ea9",
+ "name": "plugins/modules/na_elementsw_ldap.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "39414c4cb613271d96220d275f027404e41e4b5dd61db5c7ad6eb3f70bc3243b",
+ "name": "plugins/modules/na_elementsw_vlan.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d42be06f947c782d42fdd9141daeb87374855fc996ecfc53a450e20216cc6e05",
+ "name": "plugins/modules/na_elementsw_cluster.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "05f518bb36b88476c0a6dc329587400937c88c64bb335bd0f3ad279c79cf845e",
+ "name": "plugins/modules/na_elementsw_volume_clone.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "54458477eb0807256e663f64924d88cf5a5cb8058c0e7212a155a4aff9f87997",
+ "name": "plugins/modules/na_elementsw_check_connections.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f90daeab34ed6f8225de664c3597302910ab15e0c4a0e3d6c930d40ffbd8afab",
+ "name": "plugins/modules/na_elementsw_drive.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7dbfc7b05e3c69ebbb1723314094d62e07a4b328cba09db899808fd50d38bc15",
+ "name": "plugins/modules/na_elementsw_account.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0d70395bc1a83498c08081aaa31fa4e5bb8ebfccbc03b7c9f1cb0aa6a4d132c9",
+ "name": "plugins/modules/na_elementsw_snapshot_restore.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b545334782c314c7c2c8e857f85838859b461176369ed002f3fba7414062b809",
+ "name": "plugins/modules/na_elementsw_backup.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d045d9768f1b469c3aeda533dbfdcbdb5a2f51a2d9949c59a3f73b56959ca082",
+ "name": "plugins/modules/na_elementsw_network_interfaces.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b822e729b9e40361b148fd9739fddf1c26705597a092b5d967e29676eed9fb66",
+ "name": "plugins/modules/na_elementsw_admin_users.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/compat",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096",
+ "name": "tests/unit/compat/unittest.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1",
+ "name": "tests/unit/compat/builtins.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "tests/unit/compat/__init__.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99",
+ "name": "tests/unit/compat/mock.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "610e61c50aa92ab89a2923d278f77e3425a214e4b40d3f32f6500c052caa8950",
+ "name": "tests/unit/requirements.txt",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/plugins",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/plugins/modules",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "66d9f46f9b572b24f6465f43d2aebfb43f3fe2858ad528472559ba089dc2fb3c",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_cluster.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f6aa0100e51bbe54b6e9edeb072b7de526542e55da1cede0d1ae5f4367ec89eb",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d910be3c377edddb04f6f74c3e4908a9d6d32c71ec251cf74e9eaa6711b1bffe",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_volume.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9390907ec097add3aa2d936dd95f63d05bfac2b5b730ae12df50d14c5a18e0c1",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_vlan.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b563b9adab2f4c7a67354fa2b7a2e3468cf68b041ba51c788e0e082e4b50b7ba",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_nodes.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ae4c8e648a16dfa704964ef0f3782ea27adec2f1c0ceb5fca84ab86e888caffa",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_cluster_config.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "189242c5691fba4c436403cbfeb512fdab01c8bd35b028d7262b4cdeca9c7376",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_qos_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5002081bc3177a94e5b2911259138ba80b2cf03006c6333c78cc50731f89fbbe",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_account.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f5cc8b59e5120ff8f6b51a9b2085d336f63c5b91d7d3f21db629176c92c2f011",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_initiators.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "655c454425b97c72bb924b5def11e8dc65dd9dc4cd40cf00df66ae85120ba40f",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fb1802b2cd87193966ccc7d8b0c6c94522d7954bfada73febb8aeae77367322c",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_template.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "489f21207a0de4f7ab263096c0f2d2c674cb9a334b45edb76165f7a933b13c5e",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4682bf1c6d258032a9a9b001254246a2993e006ab2aa32463e42bed5e192e09f",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_access_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0899affdd61dee67e349b33c5084d2dd632632c2ea01ad196267b67218444a71",
+ "name": "tests/unit/plugins/modules/test_na_elementsw_info.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/plugins/modules_utils",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a40d8651793b9771d6f56d5e8b52772597a77e317002a9f9bf3400cffd014d60",
+ "name": "tests/unit/plugins/modules_utils/test_netapp_module.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "changelogs",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "changelogs/fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "784b39c5d9440affb1dbab3ba8769ec1e88e7570798448c238a77d32dbf6e505",
+ "name": "changelogs/fragments/DEVOPS-3324.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "56bed0aab9696af7068eb1bb743eb316ab23c3200ac6faa715a303e5f33f0973",
+ "name": "changelogs/fragments/20.9.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "94573d6e6ddde5f8a053d72a7e49d87d13c4274f5ea5c24c6c0a95947215977b",
+ "name": "changelogs/fragments/DEVOPS-3196.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7b1a5ef7df5f1e6e66ddc013149aea0480eb79f911a0563e2e6d7d9af79d5572",
+ "name": "changelogs/fragments/2019.10.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7cfc4addbf3343a3ce121f5de6cc2cc8244ad7b62a7429c2694543dabc2a8ccf",
+ "name": "changelogs/fragments/DEVOPS-3174.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2c98764e792ed6c6d9cee6df80b9fff8f4fcadaf765c0aa0f0ed3dd5e3080fec",
+ "name": "changelogs/fragments/20.2.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "242f770eafb49994810a3263e23e1d342aeb36396819045c48f491810aab6908",
+ "name": "changelogs/fragments/DEVOPS-3117.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8132aa931d13a49ba1a3c0fee131c048c6767ce17b3d9cabafa7e34f3c7c239a",
+ "name": "changelogs/fragments/DEVOPS-3310.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "cddb1135b1c15ca3c8f130bcc439d73ac819c7a3e0472c9ff358c75405bd8cb3",
+ "name": "changelogs/fragments/DEVOPS-3235.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b13007f7b14dd35357ec0fb06b0e89cf5fee56036b0a6004dfb21c46010cb7c1",
+ "name": "changelogs/fragments/20.8.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0efa05e4cca58b1bfe30a60673adc266e7598d841065486b5b29c7e7a8b29bf4",
+ "name": "changelogs/fragments/DEVOPS-3188.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6192b3cccdc7c1e1eb0d61a49dd20c6f234499b6dd9b52b2f974b673e99f7a47",
+ "name": "changelogs/fragments/20.6.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "70f470630a3fb893540ad9060634bfd0955e4a3371ab1a921e44bdc6b5ea1ba5",
+ "name": "changelogs/config.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1ef0a26328d5885926828a2ed92b002171fcc1914922fc8314aea8f296607653",
+ "name": "changelogs/.plugin-cache.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "78fe42293830d0095236c8eae1d21a7411f81ef0580b2a05893b00c7b1c0f293",
+ "name": "changelogs/changelog.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5262da47e5e013fbae6c689c79c771e137a733084b9d7172f73a7b0c1c935623",
+ "name": "README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5810eefe77b0dac2ec501d77b67c2ea5a84868dc2286d745d9bb263a875aad1a",
+ "name": "CHANGELOG.rst",
+ "chksum_type": "sha256",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/MANIFEST.json b/collections-debian-merged/ansible_collections/netapp/elementsw/MANIFEST.json
new file mode 100644
index 00000000..92b313db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/MANIFEST.json
@@ -0,0 +1,32 @@
+{
+ "collection_info": {
+ "description": "Netapp ElementSW (Solidfire) Collection",
+ "repository": "https://github.com/ansible-collections/netapp",
+ "tags": [
+ "storage"
+ ],
+ "dependencies": {},
+ "authors": [
+ "NetApp Ansible Team <ng-ansibleteam@netapp.com>"
+ ],
+ "issues": null,
+ "name": "elementsw",
+ "license": [
+ "GPL-2.0-or-later"
+ ],
+ "documentation": null,
+ "namespace": "netapp",
+ "version": "20.11.0",
+ "readme": "README.md",
+ "license_file": null,
+ "homepage": "https://netapp.io/configuration-management-and-automation/"
+ },
+ "file_manifest_file": {
+ "format": 1,
+ "ftype": "file",
+ "chksum_sha256": "12fad5771045661b2b0cd7a3bde0b8a2048ae1f0d46510ef4cb53fa8f15eda22",
+ "name": "FILES.json",
+ "chksum_type": "sha256"
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/README.md b/collections-debian-merged/ansible_collections/netapp/elementsw/README.md
new file mode 100644
index 00000000..533b8ee5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/README.md
@@ -0,0 +1,102 @@
+netapp.elementSW
+
+NetApp ElementSW Collection
+
+Copyright (c) 2019 NetApp, Inc. All rights reserved.
+Specifications subject to change without notice.
+
+# Installation
+```bash
+ansible-galaxy collection install netapp.elementsw
+```
+To use Collection add the following to the top of your playbook, with out this you will be using Ansible 2.9 version of the module
+```
+collections:
+ - netapp.elementsw
+```
+# Need help
+Join our Slack Channel at [Netapp.io](http://netapp.io/slack)
+
+# Release Notes
+
+## 20.11.0
+
+### Minor changes
+- na_elementsw_snapshot_schedule - Add `retention` in examples.
+
+### Bug Fixes
+- na_elementsw_drive - Object of type 'dict_values' is not JSON serializable.
+
+## 20.10.0
+
+### New Modules
+- na_elementsw_info: support for two subsets `cluster_accounts`, `node_config`.
+
+### New Options
+- na_elementsw_cluster: `encryption` to enable encryption at rest. `order_number` and `serial_number` for demo purposes.
+- na_elementsw_network_interfaces: restructure options, into 2 dictionaries `bond_1g` and `bond_10g`, so that there is no shared option. Disallow all older options.
+- na_elementsw_network_interfaces: make all options not required, so that only bond_1g can be set for example.
+
+## 20.9.1
+
+### Bug Fixes
+- na_elementsw_node: improve error reporting when cluster name cannot be set because node is already active.
+- na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back
+
+## 20.9.0
+
+### New Modules
+- na_elementsw_qos_policy: create, modify, rename, or delete QOS policy.
+
+### New Options
+- na_elementsw_node: `cluster_name` to set the cluster name on new nodes.
+- na_elementsw_node: `preset_only` to only set the cluster name before creating a cluster with na_elementsw_cluster.
+- na_elementsw_volume: `qos_policy_name` to provide a QOS policy name or ID.
+
+### Bug Fixes
+- na_elementsw_node: fix check_mode so that no action is taken.
+
+## 20.8.0
+
+### New Options
+- na_elementsw_drive: add all drives in a cluster, allow for a list of nodes or a list of drives.
+
+### Bug Fixes
+- na_elementsw_access_group: fix check_mode so that no action is taken.
+- na_elementsw_admin_users: fix check_mode so that no action is taken.
+- na_elementsw_cluster: create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create.
+- na_elementsw_cluster_snmp: double exception because of AttributeError.
+- na_elementsw_drive: node_id or drive_id were not handled properly when using numeric ids.
+- na_elementsw_initiators: volume_access_group_id was ignored. volume_access_groups was ignored and redundant.
+- na_elementsw_ldap: double exception because of AttributeError.
+- na_elementsw_snapshot_schedule: ignore schedules being deleted (idempotency), remove default values and fix documentation.
+- na_elementsw_vlan: AttributeError if VLAN already exists.
+- na_elementsw_vlan: fix check_mode so that no action is taken.
+- na_elementsw_vlan: change in attributes was ignored.
+- na_elementsw_volume: double exception because of AttributeError.
+- na_elementsw_volume: Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation.
+
+### Module documentation changes
+- use a three group format for `version_added`. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+- add type: str (or int, dict) where missing in documentation section.
+- add required: true where missing.
+- remove required: true for state and use present as default.
+
+## 20.6.0
+### Bug Fixes
+- galaxy.xml: fix repository and homepage links
+
+## 20.2.0
+### Bug Fixes
+- galaxy.yml: fix path to github repository.
+- netapp.py: report error in case of connection error rather than raising a generic exception by default.
+
+## 20.1.0
+### New Module
+- na_elementsw_access_group_volumes: add/remove volumes to/from existing access group
+
+## 19.11.0
+## 19.10.0
+Changes in 19.10.0 and September collection releases compared to Ansible 2.9
+### Documentation Fixes:
+- na_elementsw_drive: na_elementsw_drive was documented as na_element_drive
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/.plugin-cache.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/.plugin-cache.yaml
new file mode 100644
index 00000000..a2b3e374
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/.plugin-cache.yaml
@@ -0,0 +1,135 @@
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ na_elementsw_access_group:
+ description: NetApp Element Software Manage Access Groups
+ name: na_elementsw_access_group
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_access_group_volumes:
+ description: NetApp Element Software Add/Remove Volumes to/from Access Group
+ name: na_elementsw_access_group_volumes
+ namespace: ''
+ version_added: 20.1.0
+ na_elementsw_account:
+ description: NetApp Element Software Manage Accounts
+ name: na_elementsw_account
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_admin_users:
+ description: NetApp Element Software Manage Admin Users
+ name: na_elementsw_admin_users
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_backup:
+ description: NetApp Element Software Create Backups
+ name: na_elementsw_backup
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_check_connections:
+ description: NetApp Element Software Check connectivity to MVIP and SVIP.
+ name: na_elementsw_check_connections
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_cluster:
+ description: NetApp Element Software Create Cluster
+ name: na_elementsw_cluster
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_cluster_config:
+ description: Configure Element SW Cluster
+ name: na_elementsw_cluster_config
+ namespace: ''
+ version_added: 2.8.0
+ na_elementsw_cluster_pair:
+ description: NetApp Element Software Manage Cluster Pair
+ name: na_elementsw_cluster_pair
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_cluster_snmp:
+ description: Configure Element SW Cluster SNMP
+ name: na_elementsw_cluster_snmp
+ namespace: ''
+ version_added: 2.8.0
+ na_elementsw_drive:
+ description: NetApp Element Software Manage Node Drives
+ name: na_elementsw_drive
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_info:
+ description: NetApp Element Software Info
+ name: na_elementsw_info
+ namespace: ''
+ version_added: 20.10.0
+ na_elementsw_initiators:
+ description: Manage Element SW initiators
+ name: na_elementsw_initiators
+ namespace: ''
+ version_added: 2.8.0
+ na_elementsw_ldap:
+ description: NetApp Element Software Manage ldap admin users
+ name: na_elementsw_ldap
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_network_interfaces:
+ description: NetApp Element Software Configure Node Network Interfaces
+ name: na_elementsw_network_interfaces
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_node:
+ description: NetApp Element Software Node Operation
+ name: na_elementsw_node
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_qos_policy:
+ description: NetApp Element Software create/modify/rename/delete QOS Policy
+ name: na_elementsw_qos_policy
+ namespace: ''
+ version_added: 20.9.0
+ na_elementsw_snapshot:
+ description: NetApp Element Software Manage Snapshots
+ name: na_elementsw_snapshot
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_snapshot_restore:
+ description: NetApp Element Software Restore Snapshot
+ name: na_elementsw_snapshot_restore
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_snapshot_schedule:
+ description: NetApp Element Software Snapshot Schedules
+ name: na_elementsw_snapshot_schedule
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_vlan:
+ description: NetApp Element Software Manage VLAN
+ name: na_elementsw_vlan
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_volume:
+ description: NetApp Element Software Manage Volumes
+ name: na_elementsw_volume
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_volume_clone:
+ description: NetApp Element Software Create Volume Clone
+ name: na_elementsw_volume_clone
+ namespace: ''
+ version_added: 2.7.0
+ na_elementsw_volume_pair:
+ description: NetApp Element Software Volume Pair
+ name: na_elementsw_volume_pair
+ namespace: ''
+ version_added: 2.7.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ vars: {}
+version: 20.10.0
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/changelog.yaml
new file mode 100644
index 00000000..8b0b6581
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/changelog.yaml
@@ -0,0 +1,179 @@
+ancestor: null
+releases:
+ 19.10.0:
+ changes:
+ minor_changes:
+ - refactor existing modules as a collection
+ fragments:
+ - 2019.10.0.yaml
+ release_date: '2019-11-14'
+ 2.7.0:
+ modules:
+ - description: NetApp Element Software Manage Access Groups
+ name: na_elementsw_access_group
+ namespace: ''
+ - description: NetApp Element Software Manage Accounts
+ name: na_elementsw_account
+ namespace: ''
+ - description: NetApp Element Software Manage Admin Users
+ name: na_elementsw_admin_users
+ namespace: ''
+ - description: NetApp Element Software Create Backups
+ name: na_elementsw_backup
+ namespace: ''
+ - description: NetApp Element Software Check connectivity to MVIP and SVIP.
+ name: na_elementsw_check_connections
+ namespace: ''
+ - description: NetApp Element Software Create Cluster
+ name: na_elementsw_cluster
+ namespace: ''
+ - description: NetApp Element Software Manage Cluster Pair
+ name: na_elementsw_cluster_pair
+ namespace: ''
+ - description: NetApp Element Software Manage Node Drives
+ name: na_elementsw_drive
+ namespace: ''
+ - description: NetApp Element Software Manage ldap admin users
+ name: na_elementsw_ldap
+ namespace: ''
+ - description: NetApp Element Software Configure Node Network Interfaces
+ name: na_elementsw_network_interfaces
+ namespace: ''
+ - description: NetApp Element Software Node Operation
+ name: na_elementsw_node
+ namespace: ''
+ - description: NetApp Element Software Manage Snapshots
+ name: na_elementsw_snapshot
+ namespace: ''
+ - description: NetApp Element Software Restore Snapshot
+ name: na_elementsw_snapshot_restore
+ namespace: ''
+ - description: NetApp Element Software Snapshot Schedules
+ name: na_elementsw_snapshot_schedule
+ namespace: ''
+ - description: NetApp Element Software Manage VLAN
+ name: na_elementsw_vlan
+ namespace: ''
+ - description: NetApp Element Software Manage Volumes
+ name: na_elementsw_volume
+ namespace: ''
+ - description: NetApp Element Software Create Volume Clone
+ name: na_elementsw_volume_clone
+ namespace: ''
+ - description: NetApp Element Software Volume Pair
+ name: na_elementsw_volume_pair
+ namespace: ''
+ release_date: '2018-09-21'
+ 2.8.0:
+ modules:
+ - description: Configure Element SW Cluster
+ name: na_elementsw_cluster_config
+ namespace: ''
+ - description: Configure Element SW Cluster SNMP
+ name: na_elementsw_cluster_snmp
+ namespace: ''
+ - description: Manage Element SW initiators
+ name: na_elementsw_initiators
+ namespace: ''
+ release_date: '2019-04-11'
+ 20.1.0:
+ modules:
+ - description: NetApp Element Software Add/Remove Volumes to/from Access Group
+ name: na_elementsw_access_group_volumes
+ namespace: ''
+ release_date: '2020-01-08'
+ 20.10.0:
+ changes:
+ minor_changes:
+ - na_elementsw_cluster - add new options ``encryption``, ``order_number``, and
+ ``serial_number``.
+ - na_elementsw_network_interfaces - make all options not required, so that only
+ bond_1g can be set for example.
+ - na_elementsw_network_interfaces - restructure options into 2 dictionaries
+ ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow
+ all older options.
+ fragments:
+ - DEVOPS-3117.yaml
+ - DEVOPS-3196.yaml
+ - DEVOPS-3235.yaml
+ modules:
+ - description: NetApp Element Software Info
+ name: na_elementsw_info
+ namespace: ''
+ release_date: '2020-10-08'
+ 20.2.0:
+ changes:
+ bugfixes:
+ - galaxy.yml - fix path to github repository.
+ - netapp.py - report error in case of connection error rather than raising a
+ generic exception by default.
+ fragments:
+ - 20.2.0.yaml
+ release_date: '2020-02-05'
+ 20.6.0:
+ changes:
+ bugfixes:
+ - galaxy.yml - fix repository and homepage links.
+ fragments:
+ - 20.6.0.yaml
+ release_date: '2020-06-03'
+ 20.8.0:
+ changes:
+ bugfixes:
+ - na_elementsw_access_group - fix check_mode so that no action is taken.
+ - na_elementsw_admin_users - fix check_mode so that no action is taken.
+ - na_elementsw_cluster - create cluster if it does not exist. Do not expect
+ MVIP or SVIP to exist before create.
+ - na_elementsw_cluster_snmp - double exception because of AttributeError.
+ - na_elementsw_drive - node_id or drive_id were not handled properly when using
+ numeric ids.
+ - na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups
+ was ignored and redundant.
+ - na_elementsw_ldap - double exception because of AttributeError.
+ - na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency),
+ remove default values and fix documentation.
+ - na_elementsw_vlan - AttributeError if VLAN already exists.
+ - na_elementsw_vlan - change in attributes was ignored.
+ - na_elementsw_vlan - fix check_mode so that no action is taken.
+ - na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid
+ python identifier - renamed to enable512emulation.
+ - na_elementsw_volume - double exception because of AttributeError.
+ minor_changes:
+ - add "required:true" where missing.
+ - add "type:str" (or int, dict) where missing in documentation section.
+ - na_elementsw_drive - add all drives in a cluster, allow for a list of nodes
+ or a list of drives.
+ - remove "required:true" for state and use present as default.
+ - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same
+ thing for 2.8 and 2.9.
+ fragments:
+ - 20.8.0.yaml
+ release_date: '2020-08-05'
+ 20.9.0:
+ changes:
+ bugfixes:
+ - na_elementsw_node - fix check_mode so that no action is taken.
+ minor_changes:
+ - na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes.
+ - na_elementsw_node - ``preset_only`` to only set the cluster name before creating
+ a cluster with na_elementsw_cluster.
+ - na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or
+ ID.
+ fragments:
+ - 20.9.0.yaml
+ modules:
+ - description: NetApp Element Software create/modify/rename/delete QOS Policy
+ name: na_elementsw_qos_policy
+ namespace: ''
+ release_date: '2020-09-02'
+ 20.9.1:
+ changes:
+ bugfixes:
+ - na_elementsw_node - improve error reporting when cluster name cannot be set
+ because node is already active.
+ - na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo
+ have been added back
+ fragments:
+ - DEVOPS-3174.yaml
+ - DEVOPS-3188.yaml
+ release_date: '2020-09-08'
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/config.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/config.yaml
new file mode 100644
index 00000000..2d637df5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/config.yaml
@@ -0,0 +1,32 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sanitize_changelog: true
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: NetApp ElementSW Collection
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml
new file mode 100644
index 00000000..832b5f56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - galaxy.yml - fix path to github repository.
+ - netapp.py - report error in case of connection error rather than raising a generic exception by default.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml
new file mode 100644
index 00000000..fcd0d11e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - galaxy.yml - fix repository and homepage links.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml
new file mode 100644
index 00000000..5c959531
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml
@@ -0,0 +1,21 @@
+minor_changes:
+ - na_elementsw_drive - add all drives in a cluster, allow for a list of nodes or a list of drives.
+ - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+ - add "type:str" (or int, dict) where missing in documentation section.
+ - add "required:true" where missing.
+ - remove "required:true" for state and use present as default.
+
+bugfixes:
+ - na_elementsw_access_group - fix check_mode so that no action is taken.
+ - na_elementsw_admin_users - fix check_mode so that no action is taken.
+ - na_elementsw_cluster - create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create.
+ - na_elementsw_cluster_snmp - double exception because of AttributeError.
+ - na_elementsw_drive - node_id or drive_id were not handled properly when using numeric ids.
+ - na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups was ignored and redundant.
+ - na_elementsw_ldap - double exception because of AttributeError.
+ - na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency), remove default values and fix documentation.
+ - na_elementsw_vlan - AttributeError if VLAN already exists.
+ - na_elementsw_vlan - fix check_mode so that no action is taken.
+ - na_elementsw_vlan - change in attributes was ignored.
+ - na_elementsw_volume - double exception because of AttributeError.
+ - na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml
new file mode 100644
index 00000000..a406c9c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml
@@ -0,0 +1,7 @@
+minor_changes:
+ - na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes.
+ - na_elementsw_node - ``preset_only`` to only set the cluster name before creating a cluster with na_elementsw_cluster.
+ - na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or ID.
+
+bugfixes:
+ - na_elementsw_node - fix check_mode so that no action is taken.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml
new file mode 100644
index 00000000..5723daa1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - refactor existing modules as a collection
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml
new file mode 100644
index 00000000..23a6cafa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_elementsw_cluster - add new options ``encryption``, ``order_number``, and ``serial_number``.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml
new file mode 100644
index 00000000..01e75471
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_elementsw_node - improve error reporting when cluster name cannot be set because node is already active.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml
new file mode 100644
index 00000000..ad5d8bee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml
new file mode 100644
index 00000000..21a70b02
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_elementsw_network_interfaces - make all options not required, so that only bond_1g can be set for example.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml
new file mode 100644
index 00000000..8a2f82f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_elementsw_network_interfaces - restructure options into 2 dictionaries ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow all older options.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml
new file mode 100644
index 00000000..729e6d06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_elementsw_snapshot_schedule - Add ``retention`` in examples.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml
new file mode 100644
index 00000000..b87e308d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_elementsw_drive - Object of type 'dict_values' is not JSON serializable.
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py
new file mode 100644
index 00000000..229d03f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+
+ # Documentation fragment for SolidFire
+ SOLIDFIRE = r'''
+options:
+ hostname:
+ required: true
+ description:
+ - The hostname or IP address of the SolidFire cluster.
+ - For na_elementsw_cluster, the Management IP (MIP) or hostname of the node to initiate the cluster creation from.
+ type: str
+ username:
+ required: true
+ description:
+ - Please ensure that the user has the adequate permissions. For more information, please read the official documentation
+ U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
+ aliases: ['user']
+ type: str
+ password:
+ required: true
+ description:
+ - Password for the specified user.
+ aliases: ['pass']
+ type: str
+
+requirements:
+ - The modules were developed with SolidFire 10.1
+ - solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
+
+notes:
+ - The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
+
+'''
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py
new file mode 100644
index 00000000..050491ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py
@@ -0,0 +1,98 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# Copyright: (c) 2018, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''
+Common methods and constants
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+try:
+ from solidfire.factory import ElementFactory
+ import solidfire.common
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+COLLECTION_VERSION = "20.11.0"
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def ontap_sf_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True)
+ )
+
+
+def create_sf_connection(module, hostname=None, port=None, raise_on_connection_error=False, timeout=None):
+ if hostname is None:
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ options = dict()
+ if port is not None:
+ options['port'] = port
+ if timeout is not None:
+ options['timeout'] = timeout
+
+ if not HAS_SF_SDK:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+ try:
+ return_val = ElementFactory.create(hostname, username, password, **options)
+ except (solidfire.common.ApiConnectionError, solidfire.common.ApiServerError) as exc:
+ if raise_on_connection_error:
+ raise exc
+ module.fail_json(msg=repr(exc))
+ except Exception as exc:
+ raise Exception("Unable to create SF connection: %s" % repr(exc))
+ return return_val
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py
new file mode 100644
index 00000000..2d8b92cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py
@@ -0,0 +1,206 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Copyright: (c) 2018, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+
+HAS_SF_SDK = False
+try:
+ import solidfire.common
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+class NaElementSWModule(object):
+ ''' Support class for common or shared functions '''
+ def __init__(self, elem):
+ self.elem_connect = elem
+ self.parameters = dict()
+
+ def get_volume(self, volume_id):
+ """
+ Return volume details if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume dict if found, None if not found
+ :rtype: dict
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume
+ return None
+
+ def get_volume_id(self, vol_name, account_id):
+ """
+ Return volume id from the given (valid) account_id if found
+ Return None if not found
+
+ :param vol_name: Name of the volume
+ :type vol_name: str
+ :param account_id: Account ID
+ :type account_id: int
+
+ :return: Volume ID of the first matching volume if found. None if not found.
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes_for_account(account_id=account_id)
+ for volume in volume_list.volumes:
+ if volume.name == vol_name:
+ # return volume_id
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
+
+ def volume_id_exists(self, volume_id):
+ """
+ Return volume_id if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume ID if found, None if not found
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
+
+ def volume_exists(self, volume, account_id):
+ """
+ Return volume_id if exists, None if not found
+
+ :param volume: Volume ID or Name
+ :type volume: str
+ :param account_id: Account ID (valid)
+ :type account_id: int
+ :return: Volume ID if found, None if not found
+ """
+ # If volume is an integer, get_by_id
+ if str(volume).isdigit():
+ volume_id = int(volume)
+ try:
+ if self.volume_id_exists(volume_id):
+ return volume_id
+ except solidfire.common.ApiServerError:
+ # don't fail, continue and try get_by_name
+ pass
+ # get volume by name
+ volume_id = self.get_volume_id(volume, account_id)
+ return volume_id
+
+ def get_snapshot(self, snapshot_id, volume_id):
+ """
+ Return snapshot details if found
+
+ :param snapshot_id: Snapshot ID or Name
+ :type snapshot_id: str
+ :param volume_id: Account ID (valid)
+ :type volume_id: int
+ :return: Snapshot dict if found, None if not found
+ :rtype: dict
+ """
+ # mandate src_volume_id although not needed by sdk
+ snapshot_list = self.elem_connect.list_snapshots(
+ volume_id=volume_id)
+ for snapshot in snapshot_list.snapshots:
+ # if actual id is provided
+ if str(snapshot_id).isdigit() and snapshot.snapshot_id == int(snapshot_id):
+ return snapshot
+ # if snapshot name is provided
+ elif snapshot.name == snapshot_id:
+ return snapshot
+ return None
+
+ @staticmethod
+ def map_qos_obj_to_dict(qos_obj):
+ ''' Take a QOS object and return a key, normalize the key names
+ Interestingly, the APIs are using different ids for create and get
+ '''
+ mappings = [
+ ('burst_iops', 'burstIOPS'),
+ ('min_iops', 'minIOPS'),
+ ('max_iops', 'maxIOPS'),
+ ]
+ qos_dict = vars(qos_obj)
+ # Align names to create API and module interface
+ for read, send in mappings:
+ if read in qos_dict:
+ qos_dict[send] = qos_dict.pop(read)
+ return qos_dict
+
+ def get_qos_policy(self, name):
+ """
+ Get QOS Policy
+ :description: Get QOS Policy object for a given name
+ :return: object, error
+ Policy object converted to dict if found, else None
+ Error text if error, else None
+ :rtype: dict/None, str/None
+ """
+ try:
+ qos_policy_list_obj = self.elem_connect.list_qos_policies()
+ except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc:
+ error = "Error getting list of qos policies: %s" % to_native(exc)
+ return None, error
+
+ policy_dict = dict()
+ if hasattr(qos_policy_list_obj, 'qos_policies'):
+ for policy in qos_policy_list_obj.qos_policies:
+ # Check and get policy object for a given name
+ if str(policy.qos_policy_id) == name:
+ policy_dict = vars(policy)
+ elif policy.name == name:
+ policy_dict = vars(policy)
+ if 'qos' in policy_dict:
+ policy_dict['qos'] = self.map_qos_obj_to_dict(policy_dict['qos'])
+
+ return policy_dict if policy_dict else None, None
+
+ def account_exists(self, account):
+ """
+ Return account_id if account exists for given account id or name
+ Raises an exception if account does not exist
+
+ :param account: Account ID or Name
+ :type account: str
+ :return: Account ID if found, None if not found
+ """
+ # If account is an integer, get_by_id
+ if account.isdigit():
+ account_id = int(account)
+ try:
+ result = self.elem_connect.get_account_by_id(account_id=account_id)
+ if result.account.account_id == account_id:
+ return account_id
+ except solidfire.common.ApiServerError:
+ # don't fail, continue and try get_by_name
+ pass
+ # get account by name, the method returns an Exception if account doesn't exist
+ result = self.elem_connect.get_account_by_name(username=account)
+ return result.account.account_id
+
+ def set_element_attributes(self, source):
+ """
+ Return telemetry attributes for the current execution
+
+ :param source: name of the module
+ :type source: str
+ :return: a dict containing telemetry attributes
+ """
+ attributes = {}
+ attributes['config-mgmt'] = 'ansible'
+ attributes['event-source'] = source
+ return attributes
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py
new file mode 100644
index 00000000..c2b02d3d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py
@@ -0,0 +1,225 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def cmp(a, b):
+ """
+ Python 3 does not have a cmp function, this will do the cmp.
+ :param a: first object to check
+ :param b: second object to check
+ :return:
+ """
+ # convert to lower case for string comparison.
+ if a is None:
+ return -1
+ if type(a) is str and type(b) is str:
+ a = a.lower()
+ b = b.lower()
+ # if list has string element, convert string to lower case.
+ if type(a) is list and type(b) is list:
+ a = [x.lower() if type(x) is str else x for x in a]
+ b = [x.lower() if type(x) is str else x for x in b]
+ a.sort()
+ b.sort()
+ return (a > b) - (a < b)
+
+
+class NetAppModule(object):
+ '''
+ Common class for NetApp modules
+ set of support functions to derive actions based
+ on the current state of the system, and a desired state
+ '''
+
+ def __init__(self):
+ self.log = list()
+ self.changed = False
+ self.parameters = {'name': 'not intialized'}
+ # self.debug = list()
+
+ def set_parameters(self, ansible_params):
+ self.parameters = dict()
+ for param in ansible_params:
+ if ansible_params[param] is not None:
+ self.parameters[param] = ansible_params[param]
+ return self.parameters
+
+ def get_cd_action(self, current, desired):
+ ''' takes a desired state and a current state, and return an action:
+ create, delete, None
+ eg:
+ is_present = 'absent'
+ some_object = self.get_object(source)
+ if some_object is not None:
+ is_present = 'present'
+ action = cd_action(current=is_present, desired = self.desired.state())
+ '''
+ if 'state' in desired:
+ desired_state = desired['state']
+ else:
+ desired_state = 'present'
+
+ if current is None and desired_state == 'absent':
+ return None
+ if current is not None and desired_state == 'present':
+ return None
+ # change in state
+ self.changed = True
+ if current is not None:
+ return 'delete'
+ return 'create'
+
+ def compare_and_update_values(self, current, desired, keys_to_compare):
+ updated_values = dict()
+ is_changed = False
+ for key in keys_to_compare:
+ if key in current:
+ if key in desired and desired[key] is not None:
+ if current[key] != desired[key]:
+ updated_values[key] = desired[key]
+ is_changed = True
+ else:
+ updated_values[key] = current[key]
+ else:
+ updated_values[key] = current[key]
+
+ return updated_values, is_changed
+
+ @staticmethod
+ def check_keys(current, desired):
+ ''' TODO: raise an error if keys do not match
+ with the exception of:
+ new_name, state in desired
+ '''
+ pass
+
+ @staticmethod
+ def compare_lists(current, desired, get_list_diff):
+ ''' compares two lists and return a list of elements that are either the desired elements or elements that are
+ modified from the current state depending on the get_list_diff flag
+ :param: current: current item attribute in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: list of attributes to be modified
+ :rtype: list
+ '''
+ desired_diff_list = [item for item in desired if item not in current] # get what in desired and not in current
+ current_diff_list = [item for item in current if item not in desired] # get what in current but not in desired
+
+ if desired_diff_list or current_diff_list:
+ # there are changes
+ if get_list_diff:
+ return desired_diff_list
+ else:
+ return desired
+ else:
+ return []
+
+ def get_modified_attributes(self, current, desired, get_list_diff=False, additional_keys=False):
+ ''' takes two dicts of attributes and return a dict of attributes that are
+ not in the current state
+ It is expected that all attributes of interest are listed in current and
+ desired.
+ The same assumption holds true for any nested directory.
+ TODO: This is actually not true for the ElementSW 'attributes' directory.
+ Practically it means you cannot add or remove a key in a modify.
+ :param: current: current attributes in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: dict of attributes to be modified
+ :rtype: dict
+
+ NOTE: depending on the attribute, the caller may need to do a modify or a
+ different operation (eg move volume if the modified attribute is an
+ aggregate name)
+ '''
+ # uncomment these 2 lines if needed
+ # self.log.append('current: %s' % repr(current))
+ # self.log.append('desired: %s' % repr(desired))
+ # if the object does not exist, we can't modify it
+ modified = dict()
+ if current is None:
+ return modified
+
+ # error out if keys do not match
+ self.check_keys(current, desired)
+
+ # collect changed attributes
+ for key, value in current.items():
+ if key in desired and desired[key] is not None:
+ if type(value) is list:
+ modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired
+ if modified_list:
+ modified[key] = modified_list
+ elif type(value) is dict:
+ modified_dict = self.get_modified_attributes(value, desired[key], get_list_diff, additional_keys=True)
+ if modified_dict:
+ modified[key] = modified_dict
+ elif cmp(value, desired[key]) != 0:
+ modified[key] = desired[key]
+ if additional_keys:
+ for key, value in desired.items():
+ if key not in current:
+ modified[key] = desired[key]
+ if modified:
+ self.changed = True
+ # Uncomment this line if needed
+ # self.log.append('modified: %s' % repr(modified))
+ return modified
+
+ def is_rename_action(self, source, target):
+ ''' takes a source and target object, and returns True
+ if a rename is required
+ eg:
+ source = self.get_object(source_name)
+ target = self.get_object(target_name)
+ action = is_rename_action(source, target)
+ :return: None for error, True for rename action, False otherwise
+ '''
+ if source is None and target is None:
+ # error, do nothing
+ # cannot rename an non existent resource
+ # alternatively we could create B
+ return None
+ if source is not None and target is not None:
+ # error, do nothing
+ # idempotency (or) new_name_is_already_in_use
+ # alternatively we could delete B and rename A to B
+ return False
+ if source is None and target is not None:
+ # do nothing, maybe the rename was already done
+ return False
+ # source is not None and target is None:
+ # rename is in order
+ self.changed = True
+ return True
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py
new file mode 100644
index 00000000..467ca415
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py
@@ -0,0 +1,397 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Access Group Manager
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_access_group
+
+short_description: NetApp Element Software Manage Access Groups
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update access groups on Element Software Cluster.
+
+options:
+
+ state:
+ description:
+ - Whether the specified access group should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ from_name:
+ description:
+ - ID or Name of the access group to rename.
+ - Required to create a new access group called 'name' by renaming 'from_name'.
+ version_added: 2.8.0
+ type: str
+
+ name:
+ description:
+ - Name for the access group for create, modify and delete operations.
+ required: True
+ aliases:
+ - src_access_group_id
+ type: str
+
+ initiators:
+ description:
+ - List of initiators to include in the access group. If unspecified, the access group will start out without configured initiators.
+ type: list
+ elements: str
+
+ volumes:
+ description:
+ - List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
+ - It accepts either volume_name or volume_id
+ type: list
+ elements: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ - It accepts either account_name or account_id
+ - if account_id is digit, it will consider as account_id
+ - If account_id is string, it will consider as account_name
+ version_added: 2.8.0
+ type: str
+
+ virtual_network_id:
+ description:
+ - The ID of the Element SW Software Cluster Virtual Network to associate the access group with.
+ type: int
+
+ virtual_network_tags:
+ description:
+ - The tags of VLAN Virtual Network Tag to associate the access group with.
+ type: list
+ elements: str
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+ type: dict
+
+'''
+
+EXAMPLES = """
+ - name: Create Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: AnsibleAccessGroup
+ volumes: [7,8]
+ account_id: 1
+
+ - name: Modify Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: AnsibleAccessGroup-Renamed
+ account_id: 1
+ attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
+
+ - name: Rename Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ from_name: AnsibleAccessGroup
+ name: AnsibleAccessGroup-Renamed
+
+ - name: Delete Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: 1
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWAccessGroup(object):
+ """
+ Element Software Volume Access Group
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ from_name=dict(required=False, type='str'),
+ name=dict(required=True, aliases=["src_access_group_id"], type='str'),
+ initiators=dict(required=False, type='list', elements='str'),
+ volumes=dict(required=False, type='list', elements='str'),
+ account_id=dict(required=False, type='str'),
+ virtual_network_id=dict(required=False, type='int'),
+ virtual_network_tags=dict(required=False, type='list', elements='str'),
+ attributes=dict(required=False, type='dict'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['account_id'])
+ ],
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ # Set up state variables
+ self.state = input_params['state']
+ self.from_name = input_params['from_name']
+ self.access_group_name = input_params['name']
+ self.initiators = input_params['initiators']
+ self.volumes = input_params['volumes']
+ self.account_id = input_params['account_id']
+ self.virtual_network_id = input_params['virtual_network_id']
+ self.virtual_network_tags = input_params['virtual_network_tags']
+ self.attributes = input_params['attributes']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group')
+
+ def get_access_group(self, name):
+ """
+ Get Access Group
+ :description: Get Access Group object for a given name
+
+ :return: object (Group object)
+ :rtype: object (Group object)
+ """
+ access_groups_list = self.sfe.list_volume_access_groups()
+ group_obj = None
+
+ for group in access_groups_list.volume_access_groups:
+ # Check and get access_group object for a given name
+ if str(group.volume_access_group_id) == name:
+ group_obj = group
+ elif group.name == name:
+ group_obj = group
+
+ return group_obj
+
+ def get_account_id(self):
+ # Validate account id
+ # Return account_id if found, None otherwise
+ try:
+ account_id = self.elementsw_helper.account_exists(self.account_id)
+ return account_id
+ except solidfire.common.ApiServerError:
+ return None
+
+ def get_volume_ids(self):
+ # Validate volume_ids
+ # Return volume ids if found, fail if not found
+ volume_ids = []
+ for volume in self.volumes:
+ volume_id = self.elementsw_helper.volume_exists(volume, self.account_id)
+ if volume_id:
+ volume_ids.append(volume_id)
+ else:
+ self.module.fail_json(msg='Specified volume %s does not exist' % volume)
+ return volume_ids
+
+ def create_access_group(self):
+ """
+ Create the Access Group
+ """
+ try:
+ self.sfe.create_volume_access_group(name=self.access_group_name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error creating volume access group %s: %s" %
+ (self.access_group_name, to_native(e)), exception=traceback.format_exc())
+
+ def delete_access_group(self):
+ """
+ Delete the Access Group
+ """
+ try:
+ self.sfe.delete_volume_access_group(volume_access_group_id=self.group_id)
+
+ except Exception as e:
+ self.module.fail_json(msg="Error deleting volume access group %s: %s" %
+ (self.access_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_access_group(self):
+ """
+ Update the Access Group if the access_group already exists
+ """
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.group_id,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.access_group_name, to_native(e)), exception=traceback.format_exc())
+
+ def rename_access_group(self):
+ """
+ Rename the Access Group to the new name
+ """
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.from_group_id,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ name=self.access_group_name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.from_name, to_native(e)), exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Process the access group operation on the Element Software Cluster
+ """
+ changed = False
+ action = None
+
+ input_account_id = self.account_id
+ if self.account_id is not None:
+ self.account_id = self.get_account_id()
+ if self.state == 'present' and self.volumes is not None:
+ if self.account_id:
+ self.volumes = self.get_volume_ids()
+ else:
+ self.module.fail_json(msg='Error: Specified account id "%s" does not exist.' % str(input_account_id))
+
+ group_detail = self.get_access_group(self.access_group_name)
+
+ if group_detail is not None:
+ # If access group found
+ self.group_id = group_detail.volume_access_group_id
+
+ if self.state == "absent":
+ action = 'delete'
+ changed = True
+ else:
+ # If state - present, check for any parameter of exising group needs modification.
+ if self.volumes is not None and len(self.volumes) > 0:
+ # Compare the volume list
+ if not group_detail.volumes:
+ # If access group does not have any volume attached
+ action = 'update'
+ changed = True
+ else:
+ for volumeID in group_detail.volumes:
+ if volumeID not in self.volumes:
+ action = 'update'
+ changed = True
+ break
+
+ elif self.initiators is not None and group_detail.initiators != self.initiators:
+ action = 'update'
+ changed = True
+
+ elif self.virtual_network_id is not None or self.virtual_network_tags is not None:
+ action = 'update'
+ changed = True
+
+ else:
+ # access_group does not exist
+ if self.state == "present" and self.from_name is not None:
+ group_detail = self.get_access_group(self.from_name)
+ if group_detail is not None:
+ # If resource pointed by from_name exists, rename the access_group to name
+ self.from_group_id = group_detail.volume_access_group_id
+ action = 'rename'
+ changed = True
+ else:
+ # If resource pointed by from_name does not exists, error out
+ self.module.fail_json(msg="Resource does not exist : %s" % self.from_name)
+ elif self.state == "present":
+ # If from_name is not defined, Create from scratch.
+ action = 'create'
+ changed = True
+
+ if changed and not self.module.check_mode:
+ if action == 'create':
+ self.create_access_group()
+ elif action == 'rename':
+ self.rename_access_group()
+ elif action == 'update':
+ self.update_access_group()
+ elif action == 'delete':
+ self.delete_access_group()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_access_group = ElementSWAccessGroup()
+ na_elementsw_access_group.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py
new file mode 100644
index 00000000..af9053a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Access Group Volumes
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_access_group_volumes
+
+short_description: NetApp Element Software Add/Remove Volumes to/from Access Group
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 20.1.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add or remove volumes to/from access group on Element Software Cluster.
+
+options:
+
+ state:
+ description:
+ - Whether the specified volumes should exist or not for this access group.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ access_group:
+ description:
+ - Name or id for the access group to add volumes to, or remove volumes from
+ required: true
+ type: str
+
+ volumes:
+ description:
+ - List of volumes to add/remove from/to the access group.
+ - It accepts either volume_name or volume_id
+ required: True
+ type: list
+ elements: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ - It accepts either account_name or account_id
+ - if account_id is numeric, look up for account_id first, then look up for account_name
+ - If account_id is not numeric, look up for account_name
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Add Volumes to Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ access_group: AnsibleAccessGroup
+ volumes: ['vol7','vol8','vol9']
+ account_id: '1'
+
+ - name: Remove Volumes from Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ access_group: AnsibleAccessGroup
+ volumes: ['vol7','vol9']
+ account_id: '1'
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWAccessGroupVolumes(object):
+ """
+ Element Software Access Group Volumes
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ access_group=dict(required=True, type='str'),
+ volumes=dict(required=True, type='list', elements='str'),
+ account_id=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ # Set up state variables
+ self.state = input_params['state']
+ self.access_group_name = input_params['access_group']
+ self.volumes = input_params['volumes']
+ self.account_id = input_params['account_id']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group')
+
+ def get_access_group(self, name):
+ """
+ Get Access Group
+ :description: Get Access Group object for a given name
+
+ :return: object (Group object)
+ :rtype: object (Group object)
+ """
+ access_groups_list = self.sfe.list_volume_access_groups()
+ group_obj = None
+
+ for group in access_groups_list.volume_access_groups:
+ # Check and get access_group object for a given name
+ if str(group.volume_access_group_id) == name:
+ group_obj = group
+ elif group.name == name:
+ group_obj = group
+
+ return group_obj
+
+ def get_account_id(self):
+ # Validate account id
+ # Return account_id if found, None otherwise
+ try:
+ account_id = self.elementsw_helper.account_exists(self.account_id)
+ return account_id
+ except solidfire.common.ApiServerError:
+ return None
+
+ def get_volume_ids(self):
+ # Validate volume_ids
+ # Return volume ids if found, fail if not found
+ volume_ids = []
+ for volume in self.volumes:
+ volume_id = self.elementsw_helper.volume_exists(volume, self.account_id)
+ if volume_id:
+ volume_ids.append(volume_id)
+ else:
+ self.module.fail_json(msg='Error: Specified volume %s does not exist' % volume)
+ return volume_ids
+
+ def update_access_group(self, volumes):
+ """
+ Update the Access Group if the access_group already exists
+ """
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.group_id,
+ volumes=volumes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.access_group_name, to_native(e)), exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Process the volume add/remove operations for the access group on the Element Software Cluster
+ """
+ changed = False
+ input_account_id = self.account_id
+
+ if self.account_id is not None:
+ self.account_id = self.get_account_id()
+ if self.account_id is None:
+ self.module.fail_json(msg='Error: Specified account id "%s" does not exist.' % str(input_account_id))
+
+ # get volume data
+ self.volume_ids = self.get_volume_ids()
+ group_detail = self.get_access_group(self.access_group_name)
+ if group_detail is None:
+ self.module.fail_json(msg='Error: Specified access group "%s" does not exist for account id: %s.' % (self.access_group_name, str(input_account_id)))
+ self.group_id = group_detail.volume_access_group_id
+ volumes = group_detail.volumes
+
+ # compare expected list of volumes to existing one
+ if self.state == "absent":
+ # remove volumes if present in access group
+ volumes = [vol for vol in group_detail.volumes if vol not in self.volume_ids]
+ else:
+ # add volumes if not already present
+ volumes = [vol for vol in self.volume_ids if vol not in group_detail.volumes]
+ volumes.extend(group_detail.volumes)
+
+ # update if there is a change
+ if len(volumes) != len(group_detail.volumes):
+ if not self.module.check_mode:
+ self.update_access_group(volumes)
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_access_group_volumes = ElementSWAccessGroupVolumes()
+ na_elementsw_access_group_volumes.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py
new file mode 100644
index 00000000..86275374
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Account Manager
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_account
+
+short_description: NetApp Element Software Manage Accounts
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update accounts on Element SW
+
+options:
+
+ state:
+ description:
+ - Whether the specified account should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ element_username:
+ description:
+ - Unique username for this account. (May be 1 to 64 characters in length).
+ required: true
+ aliases:
+ - account_id
+ type: str
+
+ from_name:
+ description:
+ - ID or Name of the account to rename.
+ - Required to create an account called 'element_username' by renaming 'from_name'.
+ version_added: 2.8.0
+ type: str
+
+ initiator_secret:
+ description:
+ - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
+ - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
+ - If not specified, a random secret is created.
+ type: str
+
+ target_secret:
+ description:
+ - CHAP secret to use for the target (mutual CHAP authentication).
+ - Should be 12-16 characters long and impenetrable.
+ - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
+ - If not specified, a random secret is created.
+ type: str
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+ type: dict
+
+ status:
+ description:
+ - Status of the account.
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: Create Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ element_username: TenantA
+
+- name: Modify Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ status: locked
+ element_username: TenantA
+
+- name: Rename Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ element_username: TenantA_Renamed
+ from_name: TenantA
+
+- name: Rename and modify Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ status: locked
+ element_username: TenantA_Renamed
+ from_name: TenantA
+
+- name: Delete Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ element_username: TenantA_Renamed
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWAccount(object):
+ """
+ Element SW Account
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ element_username=dict(required=True, aliases=["account_id"], type='str'),
+ from_name=dict(required=False, default=None),
+ initiator_secret=dict(required=False, type='str', no_log=True),
+ target_secret=dict(required=False, type='str', no_log=True),
+ attributes=dict(required=False, type='dict'),
+ status=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ params = self.module.params
+
+ # set up state variables
+ self.state = params.get('state')
+ self.element_username = params.get('element_username')
+ self.from_name = params.get('from_name')
+ self.initiator_secret = params.get('initiator_secret')
+ self.target_secret = params.get('target_secret')
+ self.attributes = params.get('attributes')
+ self.status = params.get('status')
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the Element SW Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_account'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_account')
+
+ def get_account(self, username):
+ """
+ Get Account
+ :description: Get Account object from account id or name
+
+ :return: Details about the account. None if not found.
+ :rtype: object (Account object)
+ """
+
+ account_list = self.sfe.list_accounts()
+
+ for account in account_list.accounts:
+ # Check and get account object for a given name
+ if str(account.account_id) == username:
+ return account
+ elif account.username == username:
+ return account
+ return None
+
+ def create_account(self):
+ """
+ Create the Account
+ """
+ try:
+ self.sfe.add_account(username=self.element_username,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg='Error creating account %s: %s' % (self.element_username, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_account(self):
+ """
+ Delete the Account
+ """
+ try:
+ self.sfe.remove_account(account_id=self.account_id)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_account(self):
+ """
+ Rename the Account
+ """
+ try:
+ self.sfe.modify_account(account_id=self.account_id,
+ username=self.element_username,
+ status=self.status,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error renaming account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_account(self):
+ """
+ Update the Account if account already exists
+ """
+ try:
+ self.sfe.modify_account(account_id=self.account_id,
+ status=self.status,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Process the account operation on the Element OS Cluster
+ """
+ changed = False
+ update_account = False
+ account_detail = self.get_account(self.element_username)
+
+ if account_detail is None and self.state == 'present':
+ changed = True
+
+ elif account_detail is not None:
+ # If account found
+ self.account_id = account_detail.account_id
+
+ if self.state == 'absent':
+ changed = True
+ else:
+ # If state - present, check for any parameter of exising account needs modification.
+ if account_detail.username is not None and self.element_username is not None and \
+ account_detail.username != self.element_username:
+ update_account = True
+ changed = True
+ elif account_detail.status is not None and self.status is not None \
+ and account_detail.status != self.status:
+ update_account = True
+ changed = True
+
+ elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
+ and account_detail.initiator_secret != self.initiator_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.target_secret is not None and self.target_secret is not None \
+ and account_detail.target_secret != self.target_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.attributes is not None and self.attributes is not None \
+ and account_detail.attributes != self.attributes:
+ update_account = True
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ # Skipping the changes
+ pass
+ else:
+ if self.state == 'present':
+ if update_account:
+ self.update_account()
+ else:
+ if self.from_name is not None:
+ # If from_name is defined
+ account_exists = self.get_account(self.from_name)
+ if account_exists is not None:
+ # If resource pointed by from_name exists, rename the account to name
+ self.account_id = account_exists.account_id
+ self.rename_account()
+ else:
+ # If resource pointed by from_name does not exists, error out
+ self.module.fail_json(msg="Resource does not exist : %s" % self.from_name)
+ else:
+ # If from_name is not defined, create from scratch.
+ self.create_account()
+ elif self.state == 'absent':
+ self.delete_account()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_account = ElementSWAccount()
+ na_elementsw_account.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py
new file mode 100644
index 00000000..7ad46648
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_admin_users
+
+short_description: NetApp Element Software Manage Admin Users
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update admin users on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified account should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ element_username:
+ description:
+ - Unique username for this account. (May be 1 to 64 characters in length).
+ required: true
+ type: str
+
+ element_password:
+ description:
+ - The password for the new admin account. Setting the password attribute will always reset your password, even if the password is the same
+ type: str
+
+ acceptEula:
+ description:
+ - Boolean, true for accepting Eula, False Eula
+ type: bool
+
+ access:
+ description:
+ - A list of types the admin has access to
+ type: list
+ elements: str
+'''
+
+EXAMPLES = """
+ - name: Add admin user
+ na_elementsw_admin_users:
+ state: present
+ username: "{{ admin_user_name }}"
+ password: "{{ admin_password }}"
+ hostname: "{{ hostname }}"
+ element_username: carchi8py
+ element_password: carchi8py
+ acceptEula: True
+ access: accounts,drives
+
+ - name: modify admin user
+ na_elementsw_admin_users:
+ state: present
+ username: "{{ admin_user_name }}"
+ password: "{{ admin_password }}"
+ hostname: "{{ hostname }}"
+ element_username: carchi8py
+ element_password: carchi8py12
+ acceptEula: True
+ access: accounts,drives,nodes
+
+ - name: delete admin user
+ na_elementsw_admin_users:
+ state: absent
+ username: "{{ admin_user_name }}"
+ password: "{{ admin_password }}"
+ hostname: "{{ hostname }}"
+ element_username: carchi8py
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class NetAppElementSWAdminUser(object):
+ """
+ Class to set, modify and delete admin users on ElementSW box
+ """
+
+ def __init__(self):
+ """
+ Initialize the NetAppElementSWAdminUser class.
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ element_username=dict(required=True, type='str'),
+ element_password=dict(required=False, type='str', no_log=True),
+ acceptEula=dict(required=False, type='bool'),
+ access=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ param = self.module.params
+ # set up state variables
+ self.state = param['state']
+ self.element_username = param['element_username']
+ self.element_password = param['element_password']
+ self.acceptEula = param['acceptEula']
+ self.access = param['access']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_admin_users')
+
+ def does_admin_user_exist(self):
+ """
+ Checks to see if an admin user exists or not
+ :return: True if the user exist, False if it dose not exist
+ """
+ admins_list = self.sfe.list_cluster_admins()
+ for admin in admins_list.cluster_admins:
+ if admin.username == self.element_username:
+ return True
+ return False
+
+ def get_admin_user(self):
+ """
+ Get the admin user object
+ :return: the admin user object
+ """
+ admins_list = self.sfe.list_cluster_admins()
+ for admin in admins_list.cluster_admins:
+ if admin.username == self.element_username:
+ return admin
+ return None
+
+ def modify_admin_user(self):
+ """
+ Modify a admin user. If a password is set the user will be modified as there is no way to
+ compare a new password with an existing one
+ :return: if a user was modified or not
+ """
+ changed = False
+ admin_user = self.get_admin_user()
+ if self.access is not None and len(self.access) > 0:
+ for access in self.access:
+ if access not in admin_user.access:
+ changed = True
+ if changed and not self.module.check_mode:
+ self.sfe.modify_cluster_admin(cluster_admin_id=admin_user.cluster_admin_id,
+ access=self.access,
+ password=self.element_password,
+ attributes=self.attributes)
+
+ return changed
+
+ def add_admin_user(self):
+ """
+ Add's a new admin user to the element cluster
+ :return: nothing
+ """
+ self.sfe.add_cluster_admin(username=self.element_username,
+ password=self.element_password,
+ access=self.access,
+ accept_eula=self.acceptEula,
+ attributes=self.attributes)
+
+ def delete_admin_user(self):
+ """
+ Deletes an existing admin user from the element cluster
+ :return: nothing
+ """
+ admin_user = self.get_admin_user()
+ self.sfe.remove_cluster_admin(cluster_admin_id=admin_user.cluster_admin_id)
+
+ def apply(self):
+ """
+ determines which method to call to set, delete or modify admin users
+ :return:
+ """
+ changed = False
+ if self.state == "present":
+ if self.does_admin_user_exist():
+ changed = self.modify_admin_user()
+ else:
+ if not self.module.check_mode:
+ self.add_admin_user()
+ changed = True
+ else:
+ if self.does_admin_user_exist():
+ if not self.module.check_mode:
+ self.delete_admin_user()
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppElementSWAdminUser()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py
new file mode 100644
index 00000000..e81e7c5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Backup Manager
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_elementsw_backup
+
+short_description: NetApp Element Software Create Backups
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create backup
+
+options:
+
+ src_volume_id:
+ description:
+ - ID of the backup source volume.
+ required: true
+ aliases:
+ - volume_id
+ type: str
+
+ dest_hostname:
+ description:
+ - hostname for the backup source cluster
+ - will be set equal to hostname if not specified
+ required: false
+ type: str
+
+ dest_username:
+ description:
+ - username for the backup destination cluster
+ - will be set equal to username if not specified
+ required: false
+ type: str
+
+ dest_password:
+ description:
+ - password for the backup destination cluster
+ - will be set equal to password if not specified
+ required: false
+ type: str
+
+ dest_volume_id:
+ description:
+ - ID of the backup destination volume
+ required: true
+ type: str
+
+ format:
+ description:
+ - Backup format to use
+ choices: ['native','uncompressed']
+ required: false
+ default: 'native'
+ type: str
+
+ script:
+ description:
+ - the backup script to be executed
+ required: false
+ type: str
+
+ script_parameters:
+ description:
+ - the backup script parameters
+ required: false
+ type: dict
+
+'''
+
+EXAMPLES = """
+na_elementsw_backup:
+ hostname: "{{ source_cluster_hostname }}"
+ username: "{{ source_cluster_username }}"
+ password: "{{ source_cluster_password }}"
+ src_volume_id: 1
+ dest_hostname: "{{ destination_cluster_hostname }}"
+ dest_username: "{{ destination_cluster_username }}"
+ dest_password: "{{ destination_cluster_password }}"
+ dest_volume_id: 3
+ format: native
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+import time
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWBackup(object):
+ ''' class to handle backup operations '''
+
+ def __init__(self):
+ """
+ Setup Ansible parameters and SolidFire connection
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+
+ self.argument_spec.update(dict(
+
+ src_volume_id=dict(aliases=['volume_id'], required=True, type='str'),
+ dest_hostname=dict(required=False, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True),
+ dest_volume_id=dict(required=True, type='str'),
+ format=dict(required=False, choices=['native', 'uncompressed'], default='native'),
+ script=dict(required=False, type='str'),
+ script_parameters=dict(required=False, type='dict')
+
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=[['script', 'script_parameters']],
+ supports_check_mode=True
+ )
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+
+ # If destination cluster details are not specified , set the destination to be the same as the source
+ if self.module.params["dest_hostname"] is None:
+ self.module.params["dest_hostname"] = self.module.params["hostname"]
+ if self.module.params["dest_username"] is None:
+ self.module.params["dest_username"] = self.module.params["username"]
+ if self.module.params["dest_password"] is None:
+ self.module.params["dest_password"] = self.module.params["password"]
+
+ params = self.module.params
+
+ # establish a connection to both source and destination elementsw clusters
+ self.src_connection = netapp_utils.create_sf_connection(self.module)
+ self.module.params["username"] = params["dest_username"]
+ self.module.params["password"] = params["dest_password"]
+ self.module.params["hostname"] = params["dest_hostname"]
+ self.dest_connection = netapp_utils.create_sf_connection(self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.src_connection)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_backup')
+
+ def apply(self):
+ """
+ Apply backup creation logic
+ """
+ self.create_backup()
+ self.module.exit_json(changed=True)
+
+ def create_backup(self):
+ """
+ Create backup
+ """
+
+ # Start volume write on destination cluster
+
+ try:
+ write_obj = self.dest_connection.start_bulk_volume_write(volume_id=self.module.params["dest_volume_id"],
+ format=self.module.params["format"],
+ attributes=self.attributes)
+ write_key = write_obj.key
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error starting bulk write on destination cluster", exception=to_native(err))
+
+ # Set script parameters if not passed by user
+ # These parameters are equivalent to the options used when a backup is executed via the GUI
+
+ if self.module.params["script"] is None and self.module.params["script_parameters"] is None:
+
+ self.module.params["script"] = 'bv_internal.py'
+ self.module.params["script_parameters"] = {"write": {
+ "mvip": self.module.params["dest_hostname"],
+ "username": self.module.params["dest_username"],
+ "password": self.module.params["dest_password"],
+ "key": write_key,
+ "endpoint": "solidfire",
+ "format": self.module.params["format"]},
+ "range": {"lba": 0, "blocks": 244224}}
+
+ # Start volume read on source cluster
+
+ try:
+ read_obj = self.src_connection.start_bulk_volume_read(self.module.params["src_volume_id"],
+ self.module.params["format"],
+ script=self.module.params["script"],
+ script_parameters=self.module.params["script_parameters"],
+ attributes=self.attributes)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error starting bulk read on source cluster", exception=to_native(err))
+
+ # Poll job status until it has completed
+ # SF will automatically timeout if the job is not successful after certain amount of time
+
+ completed = False
+ while completed is not True:
+ # Sleep between polling iterations to reduce api load
+ time.sleep(2)
+ try:
+ result = self.src_connection.get_async_result(read_obj.async_handle, True)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Unable to check backup job status", exception=to_native(err))
+
+ if result["status"] != 'running':
+ completed = True
+ if 'error' in result:
+ self.module.fail_json(msg=result['error']['message'])
+
+
+def main():
+ """ Run backup operation"""
+ vol_obj = ElementSWBackup()
+ vol_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py
new file mode 100644
index 00000000..2f288dc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_check_connections
+
+short_description: NetApp Element Software Check connectivity to MVIP and SVIP.
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Used to test the management connection to the cluster.
+- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity.
+
+options:
+
+ skip:
+ description:
+ - Skip checking connection to SVIP or MVIP.
+ choices: ['svip', 'mvip']
+ type: str
+
+ mvip:
+ description:
+ - Optionally, use to test connection of a different MVIP.
+ - This is not needed to test the connection to the target cluster.
+ type: str
+
+ svip:
+ description:
+ - Optionally, use to test connection of a different SVIP.
+ - This is not needed to test the connection to the target cluster.
+ type: str
+
+'''
+
+
+EXAMPLES = """
+ - name: Check connections to MVIP and SVIP
+ na_elementsw_check_connections:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class NaElementSWConnection(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']),
+ mvip=dict(required=False, type='str', default=None),
+ svip=dict(required=False, type='str', default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('skip', 'svip', ['mvip']),
+ ('skip', 'mvip', ['svip'])
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.module.params.copy()
+ self.msg = ""
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the ElementSW Python SDK")
+ else:
+ self.elem = netapp_utils.create_sf_connection(self.module, port=442)
+
+ def check_mvip_connection(self):
+ """
+ Check connection to MVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.elem.test_connect_mvip(mvip=self.parameters['mvip'])
+ # Todo - Log details about the test
+ return test.details.connected
+
+ except Exception as e:
+ self.msg += 'Error checking connection to MVIP: %s' % to_native(e)
+ return False
+
+ def check_svip_connection(self):
+ """
+ Check connection to SVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.elem.test_connect_svip(svip=self.parameters['svip'])
+ # Todo - Log details about the test
+ return test.details.connected
+ except Exception as e:
+ self.msg += 'Error checking connection to SVIP: %s' % to_native(e)
+ return False
+
+ def apply(self):
+ passed = False
+ if self.parameters.get('skip') is None:
+ # Set failed and msg
+ passed = self.check_mvip_connection()
+ # check if both connections have passed
+ passed &= self.check_svip_connection()
+ elif self.parameters['skip'] == 'mvip':
+ passed |= self.check_svip_connection()
+ elif self.parameters['skip'] == 'svip':
+ passed |= self.check_mvip_connection()
+ if not passed:
+ self.module.fail_json(msg=self.msg)
+ else:
+ self.module.exit_json()
+
+
+def main():
+ connect_obj = NaElementSWConnection()
+ connect_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py
new file mode 100644
index 00000000..ede60cae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Initialize Cluster
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_cluster
+
+short_description: NetApp Element Software Create Cluster
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Initialize Element Software node ownership to form a cluster.
+ - If the cluster does not exist, username/password are still required but ignored for initial creation.
+ - username/password are used as the node credentials to see if the cluster already exists.
+ - username/password can also be used to set the cluster credentials.
+ - If the cluster already exists, no error is returned, but changed is set to false.
+ - Cluster modifications are not supported and are ignored.
+
+options:
+ management_virtual_ip:
+ description:
+ - Floating (virtual) IP address for the cluster on the management network.
+ required: true
+ type: str
+
+ storage_virtual_ip:
+ description:
+ - Floating (virtual) IP address for the cluster on the storage (iSCSI) network.
+ required: true
+ type: str
+
+ replica_count:
+ description:
+ - Number of replicas of each piece of data to store in the cluster.
+ default: 2
+ type: int
+
+ cluster_admin_username:
+ description:
+ - Username for the cluster admin.
+ - If not provided, default to username.
+ type: str
+
+ cluster_admin_password:
+ description:
+ - Initial password for the cluster admin account.
+ - If not provided, default to password.
+ type: str
+
+ accept_eula:
+ description:
+ - Required to indicate your acceptance of the End User License Agreement when creating this cluster.
+ - To accept the EULA, set this parameter to true.
+ type: bool
+
+ nodes:
+ description:
+ - Storage IP (SIP) addresses of the initial set of nodes making up the cluster.
+ - nodes IP must be in the list.
+ required: true
+ type: list
+ elements: str
+
+ attributes:
+ description:
+ - List of name-value pairs in JSON object format.
+ type: dict
+
+ timeout:
+ description:
+ - Time to wait for cluster creation to complete.
+ default: 100
+ type: int
+ version_added: 20.8.0
+
+ fail_if_cluster_already_exists_with_larger_ensemble:
+ description:
+ - If the cluster exists, the default is to verify that I(nodes) is a superset of the existing ensemble.
+ - A superset is accepted because some nodes may have a different role.
+ - But the module reports an error if the existing ensemble contains a node not listed in I(nodes).
+ - This checker is disabled when this option is set to false.
+ default: true
+ type: bool
+ version_added: 20.8.0
+
+ encryption:
+ description: to enable or disable encryption at rest
+ type: bool
+ version_added: 20.10.0
+
+ order_number:
+ description: (experimental) order number as provided by NetApp
+ type: str
+ version_added: 20.10.0
+
+ serial_number:
+ description: (experimental) serial number as provided by NetApp
+ type: str
+ version_added: 20.10.0
+'''
+
+EXAMPLES = """
+
+ - name: Initialize new cluster
+ tags:
+ - elementsw_cluster
+ na_elementsw_cluster:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ management_virtual_ip: 10.226.108.32
+ storage_virtual_ip: 10.226.109.68
+ replica_count: 2
+ accept_eula: true
+ nodes:
+ - 10.226.109.72
+ - 10.226.109.74
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWCluster(object):
+ """
+ Element Software Initialize node with ownership for cluster formation
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ management_virtual_ip=dict(required=True, type='str'),
+ storage_virtual_ip=dict(required=True, type='str'),
+ replica_count=dict(required=False, type='int', default=2),
+ cluster_admin_username=dict(required=False, type='str'),
+ cluster_admin_password=dict(required=False, type='str', no_log=True),
+ accept_eula=dict(required=False, type='bool'),
+ nodes=dict(required=True, type='list', elements='str'),
+ attributes=dict(required=False, type='dict', default=None),
+ timeout=dict(required=False, type='int', default=100),
+ fail_if_cluster_already_exists_with_larger_ensemble=dict(required=False, type='bool', default=True),
+ encryption=dict(required=False, type='bool'),
+ order_number=dict(required=False, type='str'),
+ serial_number=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.management_virtual_ip = input_params['management_virtual_ip']
+ self.storage_virtual_ip = input_params['storage_virtual_ip']
+ self.replica_count = input_params['replica_count']
+ self.accept_eula = input_params.get('accept_eula')
+ self.attributes = input_params.get('attributes')
+ self.nodes = input_params['nodes']
+ self.cluster_admin_username = input_params['username'] if input_params.get('cluster_admin_username') is None else input_params['cluster_admin_username']
+ self.cluster_admin_password = input_params['password'] if input_params.get('cluster_admin_password') is None else input_params['cluster_admin_password']
+ self.fail_if_cluster_already_exists_with_larger_ensemble = input_params['fail_if_cluster_already_exists_with_larger_ensemble']
+ self.encryption = input_params['encryption']
+ self.order_number = input_params['order_number']
+ self.serial_number = input_params['serial_number']
+ self.debug = list()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+
+ # 442 for node APIs, 443 (default) for cluster APIs
+ for role, port in [('node', 442), ('cluster', 443)]:
+ try:
+ # even though username/password should be optional, create_sf_connection fails if not set
+ conn = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, port=port, timeout=input_params['timeout'])
+ if role == 'node':
+ self.sfe_node = conn
+ else:
+ self.sfe_cluster = conn
+ except netapp_utils.solidfire.common.ApiConnectionError as exc:
+ if str(exc) == "Bad Credentials":
+ msg = 'Most likely the cluster is already created.'
+ msg += ' Make sure to use valid %s credentials for username and password.' % 'node' if port == 442 else 'cluster'
+ msg += ' Even though credentials are not required for the first create, they are needed to check whether the cluster already exists.'
+ msg += ' Cluster reported: %s' % repr(exc)
+ else:
+ msg = 'Failed to create connection: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+ except Exception as exc:
+ self.module.fail_json(msg='Failed to connect: %s' % repr(exc))
+
+ self.elementsw_helper = NaElementSWModule(self.sfe_cluster)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_cluster'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_cluster')
+
+ def get_node_cluster_info(self):
+ """
+ Get Cluster Info - using node API
+ """
+ try:
+ info = self.sfe_node.get_config()
+ self.debug.append(repr(info.config.cluster))
+ return info.config.cluster
+ except Exception as exc:
+ self.debug.append("port: %s, %s" % (str(self.sfe_node._port), repr(exc)))
+ return None
+
+ def check_cluster_exists(self):
+ """
+ validate if cluster exists with list of nodes
+ error out if something is found but with different nodes
+ return a tuple (found, info)
+ found is True if found, False if not found
+ """
+ info = self.get_node_cluster_info()
+ if info is None:
+ return False
+ ensemble = getattr(info, 'ensemble', None)
+ if not ensemble:
+ return False
+ # format is 'id:IP'
+ nodes = [x.split(':', 1)[1] for x in ensemble]
+ current_ensemble_nodes = set(nodes) if ensemble else set()
+ requested_nodes = set(self.nodes) if self.nodes else set()
+ extra_ensemble_nodes = current_ensemble_nodes - requested_nodes
+ # TODO: the cluster may have more nodes than what is reported in ensemble:
+ # nodes_not_in_ensemble = requested_nodes - current_ensemble_nodes
+ # So it's OK to find some missing nodes, but not very deterministic.
+ # eg some kind of backup nodes could be in nodes_not_in_ensemble.
+ if extra_ensemble_nodes and self.fail_if_cluster_already_exists_with_larger_ensemble:
+ msg = 'Error: found existing cluster with more nodes in ensemble. Cluster: %s, extra nodes: %s' %\
+ (getattr(info, 'cluster', 'not found'), extra_ensemble_nodes)
+ msg += '. Cluster info: %s' % repr(info)
+ self.module.fail_json(msg=msg)
+ if extra_ensemble_nodes:
+ self.debug.append("Extra ensemble nodes: %s" % extra_ensemble_nodes)
+ nodes_not_in_ensemble = requested_nodes - current_ensemble_nodes
+ if nodes_not_in_ensemble:
+ self.debug.append("Extra requested nodes not in ensemble: %s" % nodes_not_in_ensemble)
+ return True
+
+ def create_cluster_api(self, options):
+ ''' Call send_request directly rather than using the SDK if new fields are present
+ The new SDK will support these in version 1.17 (Nov or Feb)
+ '''
+ extra_options = ['enableSoftwareEncryptionAtRest', 'orderNumber', 'serialNumber']
+ if not any((item in options for item in extra_options)):
+ # use SDK
+ return self.sfe_cluster.create_cluster(**options)
+
+ # call directly the API as the SDK is not updated yet
+ params = {
+ "mvip": options['mvip'],
+ "svip": options['svip'],
+ "repCount": options['rep_count'],
+ "username": options['username'],
+ "password": options['password'],
+ "nodes": options['nodes'],
+ }
+ if options['accept_eula'] is not None:
+ params["acceptEula"] = options['accept_eula']
+ if options['attributes'] is not None:
+ params["attributes"] = options['attributes']
+ for option in extra_options:
+ if options.get(option):
+ params[option] = options[option]
+
+ # There is no adaptor.
+ return self.sfe_cluster.send_request(
+ 'CreateCluster',
+ netapp_utils.solidfire.CreateClusterResult,
+ params,
+ since=None
+ )
+
+ def create_cluster(self):
+ """
+ Create Cluster
+ """
+ options = {
+ 'mvip': self.management_virtual_ip,
+ 'svip': self.storage_virtual_ip,
+ 'rep_count': self.replica_count,
+ 'accept_eula': self.accept_eula,
+ 'nodes': self.nodes,
+ 'attributes': self.attributes,
+ 'username': self.cluster_admin_username,
+ 'password': self.cluster_admin_password
+ }
+ if self.encryption is not None:
+ options['enableSoftwareEncryptionAtRest'] = self.encryption
+ if self.order_number is not None:
+ options['orderNumber'] = self.order_number
+ if self.serial_number is not None:
+ options['serialNumber'] = self.serial_number
+
+ return_msg = 'created'
+ try:
+ # does not work as node even though documentation says otherwise
+ # running as node, this error is reported: 500 xUnknownAPIMethod method=CreateCluster
+ self.create_cluster_api(options)
+ except netapp_utils.solidfire.common.ApiServerError as exc:
+ # not sure how this can happen, but the cluster may already exists
+ if 'xClusterAlreadyCreated' not in str(exc.message):
+ self.module.fail_json(msg='Error creating cluster %s' % to_native(exc), exception=traceback.format_exc())
+ return_msg = 'already_exists: %s' % str(exc.message)
+ except Exception as exc:
+ self.module.fail_json(msg='Error creating cluster %s' % to_native(exc), exception=traceback.format_exc())
+ return return_msg
+
+ def apply(self):
+ """
+ Check connection and initialize node with cluster ownership
+ """
+ changed = False
+ result_message = None
+ exists = self.check_cluster_exists()
+ if exists:
+ result_message = "cluster already exists"
+ else:
+ changed = True
+ if not self.module.check_mode:
+ result_message = self.create_cluster()
+ if result_message.startswith('already_exists:'):
+ changed = False
+ self.module.exit_json(changed=changed, msg=result_message, debug=self.debug)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_cluster = ElementSWCluster()
+ na_elementsw_cluster.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py
new file mode 100644
index 00000000..94b5c17d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Configure cluster
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_cluster_config
+
+short_description: Configure Element SW Cluster
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure Element Software cluster.
+
+options:
+ modify_cluster_full_threshold:
+ description:
+ - The capacity level at which the cluster generates an event
+ - Requires a stage3_block_threshold_percent or
+ - max_metadata_over_provision_factor or
+ - stage2_aware_threshold
+ suboptions:
+ stage3_block_threshold_percent:
+ description:
+ - The percentage below the "Error" threshold that triggers a cluster "Warning" alert
+ type: int
+ max_metadata_over_provision_factor:
+ description:
+ - The number of times metadata space can be overprovisioned relative to the amount of space available
+ type: int
+ stage2_aware_threshold:
+ description:
+ - The number of nodes of capacity remaining in the cluster before the system triggers a notification
+ type: int
+ type: dict
+
+ encryption_at_rest:
+ description:
+ - enable or disable the Advanced Encryption Standard (AES) 256-bit encryption at rest on the cluster
+ choices: ['present', 'absent']
+ type: str
+
+ set_ntp_info:
+ description:
+ - configure NTP on cluster node
+ - Requires a list of one or more ntp_servers
+ suboptions:
+ ntp_servers:
+ description:
+ - list of NTP servers to add to each nodes NTP configuration
+ type: list
+ elements: str
+ broadcastclient:
+ type: bool
+ default: False
+ description:
+ - Enables every node in the cluster as a broadcast client
+ type: dict
+
+ enable_virtual_volumes:
+ type: bool
+ default: True
+ description:
+ - Enable the NetApp SolidFire VVols cluster feature
+'''
+
+EXAMPLES = """
+
+ - name: Configure cluster
+ tags:
+ - elementsw_cluster_config
+ na_elementsw_cluster_config:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ modify_cluster_full_threshold:
+ stage2_aware_threshold: 2
+ stage3_block_threshold_percent: 10
+ max_metadata_over_provision_factor: 2
+ encryption_at_rest: absent
+ set_ntp_info:
+ broadcastclient: False
+ ntp_servers:
+ - 1.1.1.1
+ - 2.2.2.2
+ enable_virtual_volumes: True
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWClusterConfig(object):
+ """
+ Element Software Configure Element SW Cluster
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+
+ self.argument_spec.update(dict(
+ modify_cluster_full_threshold=dict(
+ type='dict',
+ options=dict(
+ stage2_aware_threshold=dict(type='int', default=None),
+ stage3_block_threshold_percent=dict(type='int', default=None),
+ max_metadata_over_provision_factor=dict(type='int', default=None)
+ )
+ ),
+ encryption_at_rest=dict(type='str', choices=['present', 'absent']),
+ set_ntp_info=dict(
+ type='dict',
+ options=dict(
+ broadcastclient=dict(type='bool', default=False),
+ ntp_servers=dict(type='list', elements='str')
+ )
+ ),
+ enable_virtual_volumes=dict(type='bool', default=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_ntp_details(self):
+ """
+ get ntp info
+ """
+ # Get ntp details
+ ntp_details = self.sfe.get_ntp_info()
+ return ntp_details
+
+ def cmp(self, provided_ntp_servers, existing_ntp_servers):
+ # As python3 doesn't have default cmp function, defining manually to provide same fuctionality.
+ return (provided_ntp_servers > existing_ntp_servers) - (provided_ntp_servers < existing_ntp_servers)
+
+ def get_cluster_details(self):
+ """
+ get cluster info
+ """
+ cluster_details = self.sfe.get_cluster_info()
+ return cluster_details
+
+ def get_vvols_status(self):
+ """
+ get vvols status
+ """
+ feature_status = self.sfe.get_feature_status(feature='vvols')
+ if feature_status is not None:
+ return feature_status.features[0].enabled
+ return None
+
+ def get_cluster_full_threshold_status(self):
+ """
+ get cluster full threshold
+ """
+ cluster_full_threshold_status = self.sfe.get_cluster_full_threshold()
+ return cluster_full_threshold_status
+
+ def setup_ntp_info(self, servers, broadcastclient=None):
+ """
+ configure ntp
+ """
+ # Set ntp servers
+ try:
+ self.sfe.set_ntp_info(servers, broadcastclient)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error configuring ntp %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def set_encryption_at_rest(self, state=None):
+ """
+ enable/disable encryption at rest
+ """
+ try:
+ if state == 'present':
+ encryption_state = 'enable'
+ self.sfe.enable_encryption_at_rest()
+ elif state == 'absent':
+ encryption_state = 'disable'
+ self.sfe.disable_encryption_at_rest()
+ except Exception as exception_object:
+ self.module.fail_json(msg='Failed to %s rest encryption %s' % (encryption_state,
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def enable_feature(self, feature):
+ """
+ enable feature
+ """
+ try:
+ self.sfe.enable_feature(feature=feature)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error enabling %s %s' % (feature, to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def set_cluster_full_threshold(self, stage2_aware_threshold=None,
+ stage3_block_threshold_percent=None,
+ max_metadata_over_provision_factor=None):
+ """
+ modify cluster full threshold
+ """
+ try:
+ self.sfe.modify_cluster_full_threshold(stage2_aware_threshold=stage2_aware_threshold,
+ stage3_block_threshold_percent=stage3_block_threshold_percent,
+ max_metadata_over_provision_factor=max_metadata_over_provision_factor)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Failed to modify cluster full threshold %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Cluster configuration
+ """
+ changed = False
+ result_message = None
+
+ if self.parameters.get('modify_cluster_full_threshold') is not None:
+ # get cluster full threshold
+ cluster_full_threshold_details = self.get_cluster_full_threshold_status()
+ # maxMetadataOverProvisionFactor
+ current_mmopf = cluster_full_threshold_details.max_metadata_over_provision_factor
+ # stage3BlockThresholdPercent
+ current_s3btp = cluster_full_threshold_details.stage3_block_threshold_percent
+ # stage2AwareThreshold
+ current_s2at = cluster_full_threshold_details.stage2_aware_threshold
+
+ # is cluster full threshold state change required?
+ if self.parameters.get("modify_cluster_full_threshold")['max_metadata_over_provision_factor'] is not None and \
+ current_mmopf != self.parameters['modify_cluster_full_threshold']['max_metadata_over_provision_factor'] or \
+ self.parameters.get("modify_cluster_full_threshold")['stage3_block_threshold_percent'] is not None and \
+ current_s3btp != self.parameters['modify_cluster_full_threshold']['stage3_block_threshold_percent'] or \
+ self.parameters.get("modify_cluster_full_threshold")['stage2_aware_threshold'] is not None and \
+ current_s2at != self.parameters['modify_cluster_full_threshold']['stage2_aware_threshold']:
+ changed = True
+ self.set_cluster_full_threshold(self.parameters['modify_cluster_full_threshold']['stage2_aware_threshold'],
+ self.parameters['modify_cluster_full_threshold']['stage3_block_threshold_percent'],
+ self.parameters['modify_cluster_full_threshold']['max_metadata_over_provision_factor'])
+
+ if self.parameters.get('encryption_at_rest') is not None:
+ # get all cluster info
+ cluster_info = self.get_cluster_details()
+ # register rest state
+ current_encryption_at_rest_state = cluster_info.cluster_info.encryption_at_rest_state
+
+ # is encryption state change required?
+ if current_encryption_at_rest_state == 'disabled' and self.parameters['encryption_at_rest'] == 'present' or \
+ current_encryption_at_rest_state == 'enabled' and self.parameters['encryption_at_rest'] == 'absent':
+ changed = True
+ self.set_encryption_at_rest(self.parameters['encryption_at_rest'])
+
+ if self.parameters.get('set_ntp_info') is not None:
+ # get all ntp details
+ ntp_details = self.get_ntp_details()
+ # register list of ntp servers
+ ntp_servers = ntp_details.servers
+ # broadcastclient
+ broadcast_client = ntp_details.broadcastclient
+
+ # has either the broadcastclient or the ntp server list changed?
+
+ if self.parameters.get('set_ntp_info')['broadcastclient'] != broadcast_client or \
+ self.cmp(self.parameters.get('set_ntp_info')['ntp_servers'], ntp_servers) != 0:
+ changed = True
+ self.setup_ntp_info(self.parameters.get('set_ntp_info')['ntp_servers'],
+ self.parameters.get('set_ntp_info')['broadcastclient'])
+
+ if self.parameters.get('enable_virtual_volumes') is not None:
+ # check vvols status
+ current_vvols_status = self.get_vvols_status()
+
+ # has the vvols state changed?
+ if current_vvols_status is False and self.parameters.get('enable_virtual_volumes') is True:
+ changed = True
+ self.enable_feature('vvols')
+ elif current_vvols_status is True and self.parameters.get('enable_virtual_volumes') is not True:
+ # vvols, once enabled, cannot be disabled
+ self.module.fail_json(msg='Error disabling vvols: this feature cannot be undone')
+
+ if self.module.check_mode is True:
+ result_message = "Check mode, skipping changes"
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_cluster_config = ElementSWClusterConfig()
+ na_elementsw_cluster_config.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py
new file mode 100644
index 00000000..af064e21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_cluster_pair
+
+short_description: NetApp Element Software Manage Cluster Pair
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete cluster pair
+
+options:
+
+ state:
+ description:
+ - Whether the specified cluster pair should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ dest_mvip:
+ description:
+ - Destination IP address of the cluster to be paired.
+ required: true
+ type: str
+
+ dest_username:
+ description:
+ - Destination username for the cluster to be paired.
+ - Optional if this is same as source cluster username.
+ type: str
+
+ dest_password:
+ description:
+ - Destination password for the cluster to be paired.
+ - Optional if this is same as source cluster password.
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Create cluster pair
+ na_elementsw_cluster_pair:
+ hostname: "{{ src_hostname }}"
+ username: "{{ src_username }}"
+ password: "{{ src_password }}"
+ state: present
+ dest_mvip: "{{ dest_hostname }}"
+
+ - name: Delete cluster pair
+ na_elementsw_cluster_pair:
+ hostname: "{{ src_hostname }}"
+ username: "{{ src_username }}"
+ password: "{{ src_password }}"
+ state: absent
+ dest_mvip: "{{ dest_hostname }}"
+ dest_username: "{{ dest_username }}"
+ dest_password: "{{ dest_password }}"
+
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWClusterPair(object):
+ """ class to handle cluster pairing operations """
+
+ def __init__(self):
+ """
+ Setup Ansible parameters and ElementSW connection
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ dest_mvip=dict(required=True, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.elem = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.elem)
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # get element_sw_connection for destination cluster
+ # overwrite existing source host, user and password with destination credentials
+ self.module.params['hostname'] = self.parameters['dest_mvip']
+ # username and password is same as source,
+ # if dest_username and dest_password aren't specified
+ if self.parameters.get('dest_username'):
+ self.module.params['username'] = self.parameters['dest_username']
+ if self.parameters.get('dest_password'):
+ self.module.params['password'] = self.parameters['dest_password']
+ self.dest_elem = netapp_utils.create_sf_connection(module=self.module)
+ self.dest_elementsw_helper = NaElementSWModule(self.dest_elem)
+
+ def check_if_already_paired(self, paired_clusters, hostname):
+ for pair in paired_clusters.cluster_pairs:
+ if pair.mvip == hostname:
+ return pair.cluster_pair_id
+ return None
+
+ def get_src_pair_id(self):
+ """
+ Check for idempotency
+ """
+ # src cluster and dest cluster exist
+ paired_clusters = self.elem.list_cluster_pairs()
+ return self.check_if_already_paired(paired_clusters, self.parameters['dest_mvip'])
+
+ def get_dest_pair_id(self):
+ """
+ Getting destination cluster_pair_id
+ """
+ paired_clusters = self.dest_elem.list_cluster_pairs()
+ return self.check_if_already_paired(paired_clusters, self.parameters['hostname'])
+
+ def pair_clusters(self):
+ """
+ Start cluster pairing on source, and complete on target cluster
+ """
+ try:
+ pair_key = self.elem.start_cluster_pairing()
+ self.dest_elem.complete_cluster_pairing(
+ cluster_pairing_key=pair_key.cluster_pairing_key)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error pairing cluster %s and %s"
+ % (self.parameters['hostname'],
+ self.parameters['dest_mvip']),
+ exception=to_native(err))
+
+ def unpair_clusters(self, pair_id_source, pair_id_dest):
+ """
+ Delete cluster pair
+ """
+ try:
+ self.elem.remove_cluster_pair(cluster_pair_id=pair_id_source)
+ self.dest_elem.remove_cluster_pair(cluster_pair_id=pair_id_dest)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error unpairing cluster %s and %s"
+ % (self.parameters['hostname'],
+ self.parameters['dest_mvip']),
+ exception=to_native(err))
+
+ def apply(self):
+ """
+ Call create / delete cluster pair methods
+ """
+ pair_id_source = self.get_src_pair_id()
+ # If already paired, find the cluster_pair_id of destination cluster
+ if pair_id_source:
+ pair_id_dest = self.get_dest_pair_id()
+ # calling helper to determine action
+ cd_action = self.na_helper.get_cd_action(pair_id_source, self.parameters)
+ if cd_action == "create":
+ self.pair_clusters()
+ elif cd_action == "delete":
+ self.unpair_clusters(pair_id_source, pair_id_dest)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """ Apply cluster pair actions """
+ cluster_obj = ElementSWClusterPair()
+ cluster_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py
new file mode 100644
index 00000000..84770019
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Configure SNMP
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_cluster_snmp
+
+short_description: Configure Element SW Cluster SNMP
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure Element Software cluster SNMP.
+
+options:
+
+ state:
+ description:
+ - This module enables you to enable SNMP on cluster nodes. When you enable SNMP, \
+ the action applies to all nodes in the cluster, and the values that are passed replace, \
+ in whole, all values set in any previous call to this module.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ snmp_v3_enabled:
+ description:
+ - Which version of SNMP has to be enabled.
+ type: bool
+
+ networks:
+ description:
+ - List of networks and what type of access they have to the SNMP servers running on the cluster nodes.
+ - This parameter is required if SNMP v3 is disabled.
+ suboptions:
+ access:
+ description:
+ - ro for read-only access.
+ - rw for read-write access.
+ - rosys for read-only access to a restricted set of system information.
+ choices: ['ro', 'rw', 'rosys']
+ type: str
+ cidr:
+ description:
+ - A CIDR network mask. This network mask must be an integer greater than or equal to 0, \
+ and less than or equal to 32. It must also not be equal to 31.
+ type: int
+ community:
+ description:
+ - SNMP community string.
+ type: str
+ network:
+ description:
+ - This parameter along with the cidr variable is used to control which network the access and \
+ community string apply to.
+ - The special value of 'default' is used to specify an entry that applies to all networks.
+ - The cidr mask is ignored when network value is either a host name or default.
+ type: str
+ type: dict
+
+ usm_users:
+ description:
+ - List of users and the type of access they have to the SNMP servers running on the cluster nodes.
+ - This parameter is required if SNMP v3 is enabled.
+ suboptions:
+ access:
+ description:
+ - rouser for read-only access.
+ - rwuser for read-write access.
+ - rosys for read-only access to a restricted set of system information.
+ choices: ['rouser', 'rwuser', 'rosys']
+ type: str
+ name:
+ description:
+ - The name of the user. Must contain at least one character, but no more than 32 characters.
+ - Blank spaces are not allowed.
+ type: str
+ password:
+ description:
+ - The password of the user. Must be between 8 and 255 characters long (inclusive).
+ - Blank spaces are not allowed.
+ - Required if 'secLevel' is 'auth' or 'priv.'
+ type: str
+ passphrase:
+ description:
+ - The passphrase of the user. Must be between 8 and 255 characters long (inclusive).
+ - Blank spaces are not allowed.
+ - Required if 'secLevel' is 'priv.'
+ type: str
+ secLevel:
+ description:
+ - To define the security level of a user.
+ choices: ['noauth', 'auth', 'priv']
+ type: str
+ type: dict
+
+'''
+
+EXAMPLES = """
+
+ - name: configure SnmpNetwork
+ tags:
+ - elementsw_cluster_snmp
+ na_elementsw_cluster_snmp:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ snmp_v3_enabled: True
+ usm_users:
+ access: rouser
+ name: testuser
+ password: ChangeMe123
+ passphrase: ChangeMe123
+ secLevel: auth
+ networks:
+ access: ro
+ cidr: 24
+ community: TestNetwork
+ network: 192.168.0.1
+
+ - name: Disable SnmpNetwork
+ tags:
+ - elementsw_cluster_snmp
+ na_elementsw_cluster_snmp:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWClusterSnmp(object):
+ """
+ Element Software Configure Element SW Cluster SnmpNetwork
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+
+ self.argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ snmp_v3_enabled=dict(type='bool'),
+ networks=dict(
+ type='dict',
+ options=dict(
+ access=dict(type='str', choices=['ro', 'rw', 'rosys']),
+ cidr=dict(type='int', default=None),
+ community=dict(type='str', default=None),
+ network=dict(type='str', default=None)
+ )
+ ),
+ usm_users=dict(
+ type='dict',
+ options=dict(
+ access=dict(type='str', choices=['rouser', 'rwuser', 'rosys']),
+ name=dict(type='str', default=None),
+ password=dict(type='str', default=None, no_log=True),
+ passphrase=dict(type='str', default=None, no_log=True),
+ secLevel=dict(type='str', choices=['auth', 'noauth', 'priv'])
+ )
+ ),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['snmp_v3_enabled']),
+ ('snmp_v3_enabled', True, ['usm_users']),
+ ('snmp_v3_enabled', False, ['networks'])
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if self.parameters.get('state') == "present":
+ if self.parameters.get('usm_users') is not None:
+ # Getting the configuration details to configure SNMP Version3
+ self.access_usm = self.parameters.get('usm_users')['access']
+ self.name = self.parameters.get('usm_users')['name']
+ self.password = self.parameters.get('usm_users')['password']
+ self.passphrase = self.parameters.get('usm_users')['passphrase']
+ self.secLevel = self.parameters.get('usm_users')['secLevel']
+ if self.parameters.get('networks') is not None:
+ # Getting the configuration details to configure SNMP Version2
+ self.access_network = self.parameters.get('networks')['access']
+ self.cidr = self.parameters.get('networks')['cidr']
+ self.community = self.parameters.get('networks')['community']
+ self.network = self.parameters.get('networks')['network']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def enable_snmp(self):
+ """
+ enable snmp feature
+ """
+ try:
+ self.sfe.enable_snmp(snmp_v3_enabled=self.parameters.get('snmp_v3_enabled'))
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error enabling snmp feature %s' % to_native(exception_object),
+ exception=traceback.format_exc())
+
+ def disable_snmp(self):
+ """
+ disable snmp feature
+ """
+ try:
+ self.sfe.disable_snmp()
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error disabling snmp feature %s' % to_native(exception_object),
+ exception=traceback.format_exc())
+
+ def configure_snmp(self, actual_networks, actual_usm_users):
+ """
+ Configure snmp
+ """
+ try:
+ self.sfe.set_snmp_acl(networks=[actual_networks], usm_users=[actual_usm_users])
+
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error Configuring snmp feature %s' % to_native(exception_object),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Cluster SNMP configuration
+ """
+ changed = False
+ result_message = None
+ update_required = False
+ version_change = False
+ is_snmp_enabled = self.sfe.get_snmp_state().enabled
+
+ if is_snmp_enabled is True:
+ # IF SNMP is already enabled
+ if self.parameters.get('state') == 'absent':
+ # Checking for state change(s) here, and applying it later in the code allows us to support
+ # check_mode
+ changed = True
+
+ elif self.parameters.get('state') == 'present':
+ # Checking if SNMP configuration needs to be updated,
+ is_snmp_v3_enabled = self.sfe.get_snmp_state().snmp_v3_enabled
+
+ if is_snmp_v3_enabled != self.parameters.get('snmp_v3_enabled'):
+ # Checking if there any version changes required
+ version_change = True
+ changed = True
+
+ if is_snmp_v3_enabled is True:
+ # Checking If snmp configuration for usm_users needs modification
+ if len(self.sfe.get_snmp_info().usm_users) == 0:
+ # If snmp is getting configured for first time
+ update_required = True
+ changed = True
+ else:
+ for usm_user in self.sfe.get_snmp_info().usm_users:
+ if usm_user.access != self.access_usm or usm_user.name != self.name or usm_user.password != self.password or \
+ usm_user.passphrase != self.passphrase or usm_user.sec_level != self.secLevel:
+ update_required = True
+ changed = True
+ else:
+ # Checking If snmp configuration for networks needs modification
+ for snmp_network in self.sfe.get_snmp_info().networks:
+ if snmp_network.access != self.access_network or snmp_network.cidr != self.cidr or \
+ snmp_network.community != self.community or snmp_network.network != self.network:
+ update_required = True
+ changed = True
+
+ else:
+ if self.parameters.get('state') == 'present':
+ changed = True
+
+ result_message = ""
+
+ if changed:
+ if self.module.check_mode is True:
+ result_message = "Check mode, skipping changes"
+
+ else:
+ if self.parameters.get('state') == "present":
+ # IF snmp is not enabled, then enable and configure snmp
+ if self.parameters.get('snmp_v3_enabled') is True:
+ # IF SNMP is enabled with version 3
+ usm_users = {'access': self.access_usm,
+ 'name': self.name,
+ 'password': self.password,
+ 'passphrase': self.passphrase,
+ 'secLevel': self.secLevel}
+ networks = None
+ else:
+ # IF SNMP is enabled with version 2
+ usm_users = None
+ networks = {'access': self.access_network,
+ 'cidr': self.cidr,
+ 'community': self.community,
+ 'network': self.network}
+
+ if is_snmp_enabled is False or version_change is True:
+ # Enable and configure snmp
+ self.enable_snmp()
+ self.configure_snmp(networks, usm_users)
+ result_message = "SNMP is enabled and configured"
+
+ elif update_required is True:
+ # If snmp is already enabled, update the configuration if required
+ self.configure_snmp(networks, usm_users)
+ result_message = "SNMP is configured"
+
+ elif is_snmp_enabled is True and self.parameters.get('state') == "absent":
+ # If snmp is enabled and state is absent, disable snmp
+ self.disable_snmp()
+ result_message = "SNMP is disabled"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_cluster_snmp = ElementSWClusterSnmp()
+ na_elementsw_cluster_snmp.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py
new file mode 100644
index 00000000..20a9e091
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Node Drives
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_drive
+
+short_description: NetApp Element Software Manage Node Drives
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Add, Erase or Remove drive for nodes on Element Software Cluster.
+
+options:
+ drive_ids:
+ description:
+ - List of Drive IDs or Serial Names of Node drives.
+ - If not specified, add and remove action will be performed on all drives of node_id
+ type: list
+ elements: str
+ aliases: ['drive_id']
+
+ state:
+ description:
+ - Element SW Storage Drive operation state.
+ - present - To add drive of node to participate in cluster data storage.
+ - absent - To remove the drive from being part of active cluster.
+ - clean - Clean-up any residual data persistent on a *removed* drive in a secured method.
+ choices: ['present', 'absent', 'clean']
+ default: 'present'
+ type: str
+
+ node_ids:
+ description:
+ - List of IDs or Names of cluster nodes.
+ - If node_ids and drive_ids are not specified, all available drives in the cluster are added if state is present.
+ - If node_ids and drive_ids are not specified, all active drives in the cluster are removed if state is absent.
+ required: false
+ type: list
+ elements: str
+ aliases: ['node_id']
+
+ force_during_upgrade:
+ description:
+ - Flag to force drive operation during upgrade.
+ type: 'bool'
+
+ force_during_bin_sync:
+ description:
+ - Flag to force during a bin sync operation.
+ type: 'bool'
+'''
+
+EXAMPLES = """
+ - name: Add drive with status available to cluster
+ tags:
+ - elementsw_add_drive
+ na_elementsw_drive:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J3221807
+ force_during_upgrade: false
+ force_during_bin_sync: false
+ node_ids: sf4805-meg-03
+
+ - name: Remove active drive from cluster
+ tags:
+ - elementsw_remove_drive
+ na_elementsw_drive:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ force_during_upgrade: false
+ drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J321208
+
+ - name: Secure Erase drive
+ tags:
+ - elemensw_clean_drive
+ na_elementsw_drive:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: clean
+ drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J432109
+ node_ids: sf4805-meg-03
+
+ - name: Add all the drives of all nodes to cluster
+ tags:
+ - elementsw_add_node
+ na_elementsw_drive:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ force_during_upgrade: false
+ force_during_bin_sync: false
+
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWDrive(object):
+ """
+ Element Software Storage Drive operations
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent', 'clean'], default='present'),
+ drive_ids=dict(required=False, type='list', elements='str', aliases=['drive_id']),
+ node_ids=dict(required=False, type='list', elements='str', aliases=['node_id']),
+ force_during_upgrade=dict(required=False, type='bool'),
+ force_during_bin_sync=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.state = input_params['state']
+ self.drive_ids = input_params['drive_ids']
+ self.node_ids = input_params['node_ids']
+ self.force_during_upgrade = input_params['force_during_upgrade']
+ self.force_during_bin_sync = input_params['force_during_bin_sync']
+ self.list_nodes = None
+ self.debug = list()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ else:
+ # increase timeout, as removing a disk takes some time
+ self.sfe = netapp_utils.create_sf_connection(module=self.module, timeout=120)
+
+ def get_node_id(self, node_id):
+ """
+ Get Node ID
+ :description: Find and retrieve node_id from the active cluster
+
+ :return: node_id (None if not found)
+ :rtype: node_id
+ """
+ if self.list_nodes is None:
+ self.list_nodes = self.sfe.list_active_nodes()
+ for current_node in self.list_nodes.nodes:
+ if node_id == str(current_node.node_id):
+ return current_node.node_id
+ elif node_id == current_node.name:
+ return current_node.node_id
+ self.module.fail_json(msg='unable to find node for node_id=%s' % node_id)
+
+ def get_drives_listby_status(self, node_num_ids):
+ """
+ Capture list of drives based on status for a given node_id
+ :description: Capture list of active, failed and available drives from a given node_id
+
+ :return: None
+ """
+ self.active_drives = dict()
+ self.available_drives = dict()
+ self.other_drives = dict()
+ self.all_drives = self.sfe.list_drives()
+
+ for drive in self.all_drives.drives:
+ # get all drives if no node is given, or match the node_ids
+ if node_num_ids is None or drive.node_id in node_num_ids:
+ if drive.status in ['active', 'failed']:
+ self.active_drives[drive.serial] = drive.drive_id
+ elif drive.status == "available":
+ self.available_drives[drive.serial] = drive.drive_id
+ else:
+ self.other_drives[drive.serial] = (drive.drive_id, drive.status)
+
+ self.debug.append('available: %s' % self.available_drives)
+ self.debug.append('active: %s' % self.active_drives)
+ self.debug.append('other: %s' % self.other_drives)
+
+ def get_drive_id(self, drive_id, node_num_ids):
+ """
+ Get Drive ID
+ :description: Find and retrieve drive_id from the active cluster
+ Assumes self.all_drives is already populated
+
+ :return: node_id (None if not found)
+ :rtype: node_id
+ """
+ for drive in self.all_drives.drives:
+ if drive_id == str(drive.drive_id):
+ break
+ if drive_id == drive.serial:
+ break
+ else:
+ self.module.fail_json(msg='unable to find drive for drive_id=%s. Debug=%s' % (drive_id, self.debug))
+ if node_num_ids and drive.node_id not in node_num_ids:
+ self.module.fail_json(msg='drive for drive_id=%s belongs to another node, with node_id=%d. Debug=%s' % (drive_id, drive.node_id, self.debug))
+ return drive.drive_id, drive.status
+
+ def get_active_drives(self, drives):
+ """
+ return a list of active drives
+ if drives is specified, only [] or a subset of disks in drives are returned
+ else all available drives for this node or cluster are returned
+ """
+ if drives is None:
+ return list(self.active_drives.values())
+ return [drive_id for drive_id, status in drives if status in ['active', 'failed']]
+
+ def get_available_drives(self, drives, action):
+ """
+ return a list of available drives (not active)
+ if drives is specified, only [] or a subset of disks in drives are returned
+ else all available drives for this node or cluster are returned
+ """
+ if drives is None:
+ return list(self.available_drives.values())
+ action_list = list()
+ for drive_id, drive_status in drives:
+ if drive_status == 'available':
+ action_list.append(drive_id)
+ elif drive_status in ['active', 'failed']:
+ # already added
+ pass
+ elif drive_status == 'erasing' and action == 'erase':
+ # already erasing
+ pass
+ elif drive_status == 'removing':
+ self.module.fail_json(msg='Error - cannot %s drive while it is being removed. Debug: %s' % (action, self.debug))
+ elif drive_status == 'erasing' and action == 'add':
+ self.module.fail_json(msg='Error - cannot %s drive while it is being erased. Debug: %s' % (action, self.debug))
+ else:
+ self.module.fail_json(msg='Error - cannot %s drive while it is in %s state. Debug: %s' % (action, drive_status, self.debug))
+ return action_list
+
+ def add_drive(self, drives=None):
+ """
+ Add Drive available for Cluster storage expansion
+ """
+ try:
+ self.sfe.add_drives(drives,
+ force_during_upgrade=self.force_during_upgrade,
+ force_during_bin_sync=self.force_during_bin_sync)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error adding drive%s: %s: %s' %
+ ('s' if len(drives) > 1 else '',
+ str(drives),
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def remove_drive(self, drives=None):
+ """
+ Remove Drive active in Cluster
+ """
+ try:
+ self.sfe.remove_drives(drives,
+ force_during_upgrade=self.force_during_upgrade)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error removing drive%s: %s: %s' %
+ ('s' if len(drives) > 1 else '',
+ str(drives),
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def secure_erase(self, drives=None):
+ """
+ Secure Erase any residual data existing on a drive
+ """
+ try:
+ self.sfe.secure_erase_drives(drives)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error cleaning data from drive%s: %s: %s' %
+ ('s' if len(drives) > 1 else '',
+ str(drives),
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Check, process and initiate Drive operation
+ """
+ changed = False
+
+ action_list = []
+ node_num_ids = None
+ drives = None
+ if self.node_ids:
+ node_num_ids = [self.get_node_id(node_id) for node_id in self.node_ids]
+
+ self.get_drives_listby_status(node_num_ids)
+ if self.drive_ids:
+ drives = [self.get_drive_id(drive_id, node_num_ids) for drive_id in self.drive_ids]
+
+ if self.state == "present":
+ action_list = self.get_available_drives(drives, 'add')
+ elif self.state == "absent":
+ action_list = self.get_active_drives(drives)
+ elif self.state == "clean":
+ action_list = self.get_available_drives(drives, 'erase')
+
+ if len(action_list) > 0:
+ changed = True
+ if not self.module.check_mode and changed:
+ if self.state == "present":
+ self.add_drive(action_list)
+ elif self.state == "absent":
+ self.remove_drive(action_list)
+ elif self.state == "clean":
+ self.secure_erase(action_list)
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Main function
+ """
+
+ na_elementsw_drive = ElementSWDrive()
+ na_elementsw_drive.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py
new file mode 100644
index 00000000..aca2a914
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Info
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_info
+short_description: NetApp Element Software Info
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 20.10.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Collect cluster and node information.
+ - Use a MVIP as hostname for cluster and node scope.
+ - Use a MIP as hostname for node scope.
+ - When using MIPs, cluster APIs are expected to fail with 'xUnknownAPIMethod method=ListAccounts'
+
+options:
+ gather_subsets:
+ description:
+ - list of subsets to gather from target cluster or node
+ - supported values
+ - node_config, cluster_accounts
+ - additional values
+ - all - for all subsets,
+ - all_clusters - all subsets at cluster scope,
+ - all_nodes - all subsets at node scope
+ type: list
+ elements: str
+ default: ['all']
+ aliases: ['gather_subset']
+
+ filter:
+ description:
+ - When a list of records is returned, this can be used to limit the records to be returned.
+ - If more than one key is used, all keys must match.
+ type: dict
+
+ fail_on_error:
+ description:
+ - by default, errors are not fatal when collecting a subset. The subset will show on error in the info output.
+ - if set to True, the module fails on the first error.
+ type: bool
+ default: false
+
+ fail_on_key_not_found:
+ description:
+ - force an error when filter is used and a key is not present in records.
+ type: bool
+ default: true
+
+ fail_on_record_not_found:
+ description:
+ - force an error when filter is used and no record is matched.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = """
+
+ - name: get all available subsets
+ na_elementsw_info:
+ hostname: "{{ elementsw_mvip }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ gather_subsets: all
+ register: result
+
+ - name: collect data for elementsw accounts using a filter
+ na_elementsw_info:
+ hostname: "{{ elementsw_mvip }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ gather_subsets: 'cluster_accounts'
+ filter:
+ username: "{{ username_to_find }}"
+ register: result
+"""
+
+RETURN = """
+
+info:
+ description:
+ - a dictionary of collected subsets
+ - each subset if in JSON format
+ returned: success
+ type: dict
+
+debug:
+ description:
+ - a list of detailed error messages if some subsets cannot be collected
+ returned: success
+ type: list
+
+"""
+from ansible.module_utils.basic import AnsibleModule
+
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWInfo(object):
+ '''
+ Element Software Initialize node with ownership for cluster formation
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ gather_subsets=dict(type='list', elements='str', aliases=['gather_subset'], default='all'),
+ filter=dict(type='dict'),
+ fail_on_error=dict(type='bool', default=False),
+ fail_on_key_not_found=dict(type='bool', default=True),
+ fail_on_record_not_found=dict(type='bool', default=False),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.debug = list()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+
+ # 442 for node APIs, 443 (default) for cluster APIs
+ for role, port in [('node', 442), ('cluster', 443)]:
+ try:
+ conn = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, port=port)
+ if role == 'node':
+ self.sfe_node = conn
+ else:
+ self.sfe_cluster = conn
+ except netapp_utils.solidfire.common.ApiConnectionError as exc:
+ if str(exc) == "Bad Credentials":
+ msg = ' Make sure to use valid %s credentials for username and password.' % 'node' if port == 442 else 'cluster'
+ msg += '%s reported: %s' % ('Node' if port == 442 else 'Cluster', repr(exc))
+ else:
+ msg = 'Failed to create connection for %s:%d - %s' % (self.parameters['hostname'], port, repr(exc))
+ self.module.fail_json(msg=msg)
+ except Exception as exc:
+ self.module.fail_json(msg='Failed to connect for %s:%d - %s' % (self.parameters['hostname'], port, repr(exc)))
+
+ # TODO: add new node methods here
+ self.node_methods = dict(
+ node_config=self.sfe_node.get_config,
+ )
+ # TODO: add new cluster methods here
+ self.cluster_methods = dict(
+ cluster_accounts=self.sfe_cluster.list_accounts
+ )
+ self.methods = dict(self.node_methods)
+ self.methods.update(self.cluster_methods)
+
+ # add telemetry attributes - does not matter if we are using cluster or node here
+ # TODO: most if not all get and list APIs do not have an attributes parameter
+
+ def get_info(self, name):
+ '''
+ Get Element Info
+ run a cluster or node list method
+ return output as json
+ '''
+ info = None
+ if name not in self.methods:
+ msg = 'Error: unknown subset %s.' % name
+ msg += ' Known_subsets: %s' % ', '.join(self.methods.keys())
+ self.module.fail_json(msg=msg, debug=self.debug)
+ try:
+ info = self.methods[name]()
+ return info.to_json()
+ except netapp_utils.solidfire.common.ApiServerError as exc:
+ if 'err_json=500 xUnknownAPIMethod method=' in str(exc):
+ info = 'Error (API not in scope?)'
+ else:
+ info = 'Error'
+ msg = '%s for subset: %s: %s' % (info, name, repr(exc))
+ if self.parameters['fail_on_error']:
+ self.module.fail_json(msg=msg)
+ self.debug.append(msg)
+ return info
+
+ def filter_list_of_dict_by_key(self, records, key, value):
+ matched = list()
+ for record in records:
+ if key in record and record[key] == value:
+ matched.append(record)
+ if key not in record and self.parameters['fail_on_key_not_found']:
+ msg = 'Error: key %s not found in %s' % (key, repr(record))
+ self.module.fail_json(msg=msg)
+ return matched
+
+ def filter_records(self, records, filter_dict):
+
+ if isinstance(records, dict):
+ if len(records) == 1:
+ key, value = list(records.items())[0]
+ return dict({key: self.filter_records(value, filter_dict)})
+ if not isinstance(records, list):
+ return records
+ matched = records
+ for key, value in filter_dict.items():
+ matched = self.filter_list_of_dict_by_key(matched, key, value)
+ if self.parameters['fail_on_record_not_found'] and len(matched) == 0:
+ msg = 'Error: no match for %s out of %d records' % (repr(self.parameters['filter']), len(records))
+ self.debug.append('Unmatched records: %s' % repr(records))
+ self.module.fail_json(msg=msg, debug=self.debug)
+ return matched
+
+ def get_and_filter_info(self, name):
+ '''
+ Get data
+ If filter is present, only return the records that are matched
+ return output as json
+ '''
+ records = self.get_info(name)
+ if self.parameters.get('filter') is None:
+ return records
+ matched = self.filter_records(records, self.parameters.get('filter'))
+ return matched
+
+ def apply(self):
+ '''
+ Check connection and initialize node with cluster ownership
+ '''
+ changed = False
+ info = dict()
+ my_subsets = ('all', 'all_clusters', 'all_nodes')
+ if any(x in self.parameters['gather_subsets'] for x in my_subsets) and len(self.parameters['gather_subsets']) > 1:
+ msg = 'When any of %s is used, no other subset is allowed' % repr(my_subsets)
+ self.module.fail_json(msg=msg)
+ if 'all' in self.parameters['gather_subsets']:
+ self.parameters['gather_subsets'] = self.methods.keys()
+ if 'all_clusters' in self.parameters['gather_subsets']:
+ self.parameters['gather_subsets'] = self.cluster_methods.keys()
+ if 'all_nodes' in self.parameters['gather_subsets']:
+ self.parameters['gather_subsets'] = self.node_methods.keys()
+ for name in self.parameters['gather_subsets']:
+ info[name] = self.get_and_filter_info(name)
+ self.module.exit_json(changed=changed, info=info, debug=self.debug)
+
+
+def main():
+ '''
+ Main function
+ '''
+ na_elementsw_cluster = ElementSWInfo()
+ na_elementsw_cluster.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py
new file mode 100644
index 00000000..9bef345b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software manage initiators
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_initiators
+
+short_description: Manage Element SW initiators
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Manage Element Software initiators that allow external clients access to volumes.
+
+options:
+ initiators:
+ description: A list of objects containing characteristics of each initiator.
+ suboptions:
+ name:
+ description: The name of the initiator.
+ type: str
+ required: true
+
+ alias:
+ description: The friendly name assigned to this initiator.
+ type: str
+
+ initiator_id:
+ description: The numeric ID of the initiator.
+ type: int
+
+ volume_access_group_id:
+ description: volumeAccessGroupID to which this initiator belongs.
+ type: int
+
+ attributes:
+ description: A set of JSON attributes to assign to this initiator.
+ type: dict
+ type: list
+ elements: dict
+
+ state:
+ description:
+ - Whether the specified initiator should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+'''
+
+EXAMPLES = """
+
+ - name: Manage initiators
+ tags:
+ - na_elementsw_initiators
+ na_elementsw_initiators:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ initiators:
+ - name: a
+ alias: a1
+ initiator_id: 1
+ volume_access_group_id: 1
+ attributes: {"key": "value"}
+ - name: b
+ alias: b2
+ initiator_id: 2
+ volume_access_group_id: 2
+ state: present
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+if HAS_SF_SDK:
+ from solidfire.models import ModifyInitiator
+
+
+class ElementSWInitiators(object):
+ """
+ Element Software Manage Element SW initiators
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+
+ self.argument_spec.update(dict(
+ initiators=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ alias=dict(type='str', default=None),
+ initiator_id=dict(type='int', default=None),
+ volume_access_group_id=dict(type='int', default=None),
+ attributes=dict(type='dict', default=None),
+ )
+ ),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.debug = list()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # iterate over each user-provided initiator
+ for initiator in self.parameters.get('initiators'):
+ # add telemetry attributes
+ if 'attributes' in initiator and initiator['attributes']:
+ initiator['attributes'].update(self.elementsw_helper.set_element_attributes(source='na_elementsw_initiators'))
+ else:
+ initiator['attributes'] = self.elementsw_helper.set_element_attributes(source='na_elementsw_initiators')
+
+ def compare_initiators(self, user_initiator, existing_initiator):
+ """
+ compare user input initiator with existing dict
+ :return: True if matched, False otherwise
+ """
+ if user_initiator is None or existing_initiator is None:
+ return False
+ changed = False
+ for param in user_initiator:
+ # lookup initiator_name instead of name
+ if param == 'name':
+ if user_initiator['name'] == existing_initiator['initiator_name']:
+ pass
+ elif param == 'initiator_id':
+ # can't change the key
+ pass
+ elif user_initiator[param] == existing_initiator[param]:
+ pass
+ else:
+ self.debug.append('Initiator: %s. Changed: %s from: %s to %s' %
+ (user_initiator['name'], param, str(existing_initiator[param]), str(user_initiator[param])))
+ changed = True
+ return changed
+
+ def initiator_to_dict(self, initiator_obj):
+ """
+ converts initiator class object to dict
+ :return: reconstructed initiator dict
+ """
+ known_params = ['initiator_name',
+ 'alias',
+ 'initiator_id',
+ 'volume_access_groups',
+ 'volume_access_group_id',
+ 'attributes']
+ initiator_dict = {}
+
+ # missing parameter cause error
+ # so assign defaults
+ for param in known_params:
+ initiator_dict[param] = getattr(initiator_obj, param, None)
+ if initiator_dict['volume_access_groups'] is not None:
+ if len(initiator_dict['volume_access_groups']) == 1:
+ initiator_dict['volume_access_group_id'] = initiator_dict['volume_access_groups'][0]
+ elif len(initiator_dict['volume_access_groups']) > 1:
+ self.module.fail_json(msg="Only 1 access group is supported, found: %s" % repr(initiator_obj))
+ del initiator_dict['volume_access_groups']
+ return initiator_dict
+
+ def find_initiator(self, id=None, name=None):
+ """
+ find a specific initiator
+ :return: initiator dict
+ """
+ initiator_details = None
+ if self.all_existing_initiators is None:
+ return initiator_details
+ for initiator in self.all_existing_initiators:
+ # if name is provided or
+ # if id is provided
+ if name is not None:
+ if initiator.initiator_name == name:
+ initiator_details = self.initiator_to_dict(initiator)
+ elif id is not None:
+ if initiator.initiator_id == id:
+ initiator_details = self.initiator_to_dict(initiator)
+ else:
+ # if neither id nor name provided
+ # return everything
+ initiator_details = self.all_existing_initiators
+ return initiator_details
+
+ @staticmethod
+ def rename_key(obj, old_name, new_name):
+ obj[new_name] = obj.pop(old_name)
+
+ def create_initiator(self, initiator):
+ """
+ create initiator
+ """
+ # SF SDK is using camelCase for this one
+ self.rename_key(initiator, 'volume_access_group_id', 'volumeAccessGroupID')
+ # create_initiators needs an array
+ initiator_list = [initiator]
+ try:
+ self.sfe.create_initiators(initiator_list)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error creating initiator %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def delete_initiator(self, initiator):
+ """
+ delete initiator
+ """
+ # delete_initiators needs an array
+ initiator_id_array = [initiator]
+ try:
+ self.sfe.delete_initiators(initiator_id_array)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error deleting initiator %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def modify_initiator(self, initiator, existing_initiator):
+ """
+ modify initiator
+ """
+ # create the new initiator dict
+ # by merging old and new values
+ merged_initiator = existing_initiator.copy()
+ # can't change the key
+ del initiator['initiator_id']
+ merged_initiator.update(initiator)
+
+ # we MUST create an object before sending
+ # the new initiator to modify_initiator
+ initiator_object = ModifyInitiator(initiator_id=merged_initiator['initiator_id'],
+ alias=merged_initiator['alias'],
+ volume_access_group_id=merged_initiator['volume_access_group_id'],
+ attributes=merged_initiator['attributes'])
+ initiator_list = [initiator_object]
+ try:
+ self.sfe.modify_initiators(initiators=initiator_list)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error modifying initiator: %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ configure initiators
+ """
+ changed = False
+ result_message = None
+
+ # get all user provided initiators
+ input_initiators = self.parameters.get('initiators')
+
+ # get all initiators
+ # store in a cache variable
+ self.all_existing_initiators = self.sfe.list_initiators().initiators
+
+ # iterate over each user-provided initiator
+ for in_initiator in input_initiators:
+ if self.parameters.get('state') == 'present':
+ # check if initiator_id is provided and exists
+ if 'initiator_id' in in_initiator and in_initiator['initiator_id'] is not None and \
+ self.find_initiator(id=in_initiator['initiator_id']) is not None:
+ if self.compare_initiators(in_initiator, self.find_initiator(id=in_initiator['initiator_id'])):
+ changed = True
+ result_message = 'modifying initiator(s)'
+ self.modify_initiator(in_initiator, self.find_initiator(id=in_initiator['initiator_id']))
+ # otherwise check if name is provided and exists
+ elif 'name' in in_initiator and in_initiator['name'] is not None and self.find_initiator(name=in_initiator['name']) is not None:
+ if self.compare_initiators(in_initiator, self.find_initiator(name=in_initiator['name'])):
+ changed = True
+ result_message = 'modifying initiator(s)'
+ self.modify_initiator(in_initiator, self.find_initiator(name=in_initiator['name']))
+ # this is a create op if initiator doesn't exist
+ else:
+ changed = True
+ result_message = 'creating initiator(s)'
+ self.create_initiator(in_initiator)
+ elif self.parameters.get('state') == 'absent':
+ # delete_initiators only processes ids
+ # so pass ids of initiators to method
+ if 'name' in in_initiator and in_initiator['name'] is not None and \
+ self.find_initiator(name=in_initiator['name']) is not None:
+ changed = True
+ result_message = 'deleting initiator(s)'
+ self.delete_initiator(self.find_initiator(name=in_initiator['name'])['initiator_id'])
+ elif 'initiator_id' in in_initiator and in_initiator['initiator_id'] is not None and \
+ self.find_initiator(id=in_initiator['initiator_id']) is not None:
+ changed = True
+ result_message = 'deleting initiator(s)'
+ self.delete_initiator(in_initiator['initiator_id'])
+ if self.module.check_mode is True:
+ result_message = "Check mode, skipping changes"
+ if self.debug:
+ result_message += ". %s" % self.debug
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_initiators = ElementSWInitiators()
+ na_elementsw_initiators.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py
new file mode 100644
index 00000000..a71ddf56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_ldap
+
+short_description: NetApp Element Software Manage ldap admin users
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Enable, disable ldap, and add ldap users
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ authType:
+ description:
+ - Identifies which user authentication method to use.
+ choices: ['DirectBind', 'SearchAndBind']
+ type: str
+
+ groupSearchBaseDn:
+ description:
+ - The base DN of the tree to start the group search (will do a subtree search from here)
+ type: str
+
+ groupSearchType:
+ description:
+ - Controls the default group search filter used
+ choices: ['NoGroup', 'ActiveDirectory', 'MemberDN']
+ type: str
+
+ serverURIs:
+ description:
+ - A comma-separated list of LDAP server URIs
+ type: str
+
+ userSearchBaseDN:
+ description:
+ - The base DN of the tree to start the search (will do a subtree search from here)
+ type: str
+
+ searchBindDN:
+ description:
+ - A dully qualified DN to log in with to perform an LDAp search for the user (needs read access to the LDAP directory).
+ type: str
+
+ searchBindPassword:
+ description:
+ - The password for the searchBindDN account used for searching
+ type: str
+
+ userSearchFilter:
+ description:
+ - the LDAP Filter to use
+ type: str
+
+ userDNTemplate:
+ description:
+ - A string that is used form a fully qualified user DN.
+ type: str
+
+ groupSearchCustomFilter:
+ description:
+ - For use with the CustomFilter Search type
+ type: str
+'''
+
+EXAMPLES = """
+ - name: disable ldap authentication
+ na_elementsw_ldap:
+ state: absent
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ hostname: "{{ hostname }}"
+
+ - name: Enable ldap authentication
+ na_elementsw_ldap:
+ state: present
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ hostname: "{{ hostname }}"
+ authType: DirectBind
+ serverURIs: ldap://svmdurlabesx01spd_ldapclnt
+ groupSearchType: MemberDN
+ userDNTemplate: uid=%USERNAME%,cn=users,cn=accounts,dc=corp,dc="{{ company name }}",dc=com
+
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except Exception:
+ HAS_SF_SDK = False
+
+
+class NetappElementLdap(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ authType=dict(type='str', choices=['DirectBind', 'SearchAndBind']),
+ groupSearchBaseDn=dict(type='str'),
+ groupSearchType=dict(type='str', choices=['NoGroup', 'ActiveDirectory', 'MemberDN']),
+ serverURIs=dict(type='str'),
+ userSearchBaseDN=dict(type='str'),
+ searchBindDN=dict(type='str'),
+ searchBindPassword=dict(type='str', no_log=True),
+ userSearchFilter=dict(type='str'),
+ userDNTemplate=dict(type='str'),
+ groupSearchCustomFilter=dict(type='str'),
+ )
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ param = self.module.params
+
+ # set up state variables
+ self.state = param['state']
+ self.authType = param['authType']
+ self.groupSearchBaseDn = param['groupSearchBaseDn']
+ self.groupSearchType = param['groupSearchType']
+ self.serverURIs = param['serverURIs']
+ if self.serverURIs is not None:
+ self.serverURIs = self.serverURIs.split(',')
+ self.userSearchBaseDN = param['userSearchBaseDN']
+ self.searchBindDN = param['searchBindDN']
+ self.searchBindPassword = param['searchBindPassword']
+ self.userSearchFilter = param['userSearchFilter']
+ self.userDNTemplate = param['userDNTemplate']
+ self.groupSearchCustomFilter = param['groupSearchCustomFilter']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_ldap_configuration(self):
+ """
+ Return ldap configuration if found
+
+ :return: Details about the ldap configuration. None if not found.
+ :rtype: solidfire.models.GetLdapConfigurationResult
+ """
+ ldap_config = self.sfe.get_ldap_configuration()
+ return ldap_config
+
+ def enable_ldap(self):
+ """
+ Enable LDAP
+ :return: nothing
+ """
+ try:
+ self.sfe.enable_ldap_authentication(self.serverURIs, auth_type=self.authType,
+ group_search_base_dn=self.groupSearchBaseDn,
+ group_search_type=self.groupSearchType,
+ group_search_custom_filter=self.groupSearchCustomFilter,
+ search_bind_dn=self.searchBindDN,
+ search_bind_password=self.searchBindPassword,
+ user_search_base_dn=self.userSearchBaseDN,
+ user_search_filter=self.userSearchFilter,
+ user_dntemplate=self.userDNTemplate)
+ except solidfire.common.ApiServerError as error:
+ self.module.fail_json(msg='Error enabling LDAP: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def check_config(self, ldap_config):
+ """
+ Check to see if the ldap config has been modified.
+ :param ldap_config: The LDAP configuration
+ :return: False if the config is the same as the playbook, True if it is not
+ """
+ if self.authType != ldap_config.ldap_configuration.auth_type:
+ return True
+ if self.serverURIs != ldap_config.ldap_configuration.server_uris:
+ return True
+ if self.groupSearchBaseDn != ldap_config.ldap_configuration.group_search_base_dn:
+ return True
+ if self.groupSearchType != ldap_config.ldap_configuration.group_search_type:
+ return True
+ if self.groupSearchCustomFilter != ldap_config.ldap_configuration.group_search_custom_filter:
+ return True
+ if self.searchBindDN != ldap_config.ldap_configuration.search_bind_dn:
+ return True
+ if self.searchBindPassword != ldap_config.ldap_configuration.search_bind_password:
+ return True
+ if self.userSearchBaseDN != ldap_config.ldap_configuration.user_search_base_dn:
+ return True
+ if self.userSearchFilter != ldap_config.ldap_configuration.user_search_filter:
+ return True
+ if self.userDNTemplate != ldap_config.ldap_configuration.user_dntemplate:
+ return True
+ return False
+
+ def apply(self):
+ changed = False
+ ldap_config = self.get_ldap_configuration()
+ if self.state == 'absent':
+ if ldap_config and ldap_config.ldap_configuration.enabled:
+ changed = True
+ if self.state == 'present' and self.check_config(ldap_config):
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ self.enable_ldap()
+ elif self.state == 'absent':
+ self.sfe.disable_ldap_authentication()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetappElementLdap()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py
new file mode 100644
index 00000000..a9151a62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Node Network Interfaces - Bond 1G and 10G configuration
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_network_interfaces
+
+short_description: NetApp Element Software Configure Node Network Interfaces
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Configure Element SW Node Network Interfaces for Bond 1G and 10G IP addresses.
+ - This module does not create interfaces, it expects the interfaces to already exists and can only modify them.
+ - This module cannot set or modify the method (Loopback, manual, dhcp, static).
+ - This module is not idempotent and does not support check_mode.
+
+options:
+ method:
+ description:
+ - deprecated, this option would trigger a 'updated failed' error
+ type: str
+
+ ip_address_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ ip_address_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ subnet_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ subnet_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ gateway_address_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ gateway_address_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ mtu_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ mtu_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ dns_nameservers:
+ description:
+ - deprecated, use bond_1g and bond_10g options.
+ type: list
+ elements: str
+
+ dns_search_domains:
+ description:
+ - deprecated, use bond_1g and bond_10g options.
+ type: list
+ elements: str
+
+ bond_mode_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ bond_mode_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ lacp_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ lacp_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ virtual_network_tag:
+ description:
+ - deprecated, use bond_1g and bond_10g options.
+ type: str
+
+ bond_1g:
+ description:
+ - settings for the Bond1G interface.
+ type: dict
+ suboptions:
+ address:
+ description:
+ - IP address for the interface.
+ type: str
+ netmask:
+ description:
+ - subnet mask for the interface.
+ type: str
+ gateway:
+ description:
+ - IP router network address to send packets out of the local network.
+ type: str
+ mtu:
+ description:
+ - The largest packet size (in bytes) that the interface can transmit..
+ - Must be greater than or equal to 1500 bytes.
+ type: str
+ dns_nameservers:
+ description:
+ - List of addresses for domain name servers.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of DNS search domains.
+ type: list
+ elements: str
+ bond_mode:
+ description:
+ - Bonding mode.
+ choices: ['ActivePassive', 'ALB', 'LACP']
+ type: str
+ bond_lacp_rate:
+ description:
+ - Link Aggregation Control Protocol - useful only if LACP is selected as the Bond Mode.
+ - Slow - Packets are transmitted at 30 second intervals.
+ - Fast - Packets are transmitted in 1 second intervals.
+ choices: ['Fast', 'Slow']
+ type: str
+ virtual_network_tag:
+ description:
+ - The virtual network identifier of the interface (VLAN tag).
+ type: str
+
+ bond_10g:
+ description:
+ - settings for the Bond10G interface.
+ type: dict
+ suboptions:
+ address:
+ description:
+ - IP address for the interface.
+ type: str
+ netmask:
+ description:
+ - subnet mask for the interface.
+ type: str
+ gateway:
+ description:
+ - IP router network address to send packets out of the local network.
+ type: str
+ mtu:
+ description:
+ - The largest packet size (in bytes) that the interface can transmit..
+ - Must be greater than or equal to 1500 bytes.
+ type: str
+ dns_nameservers:
+ description:
+ - List of addresses for domain name servers.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of DNS search domains.
+ type: list
+ elements: str
+ bond_mode:
+ description:
+ - Bonding mode.
+ choices: ['ActivePassive', 'ALB', 'LACP']
+ type: str
+ bond_lacp_rate:
+ description:
+ - Link Aggregation Control Protocol - useful only if LACP is selected as the Bond Mode.
+ - Slow - Packets are transmitted at 30 second intervals.
+ - Fast - Packets are transmitted in 1 second intervals.
+ choices: ['Fast', 'Slow']
+ type: str
+ virtual_network_tag:
+ description:
+ - The virtual network identifier of the interface (VLAN tag).
+ type: str
+
+'''
+
+EXAMPLES = """
+
+ - name: Set Node network interfaces configuration for Bond 1G and 10G properties
+ tags:
+ - elementsw_network_interfaces
+ na_elementsw_network_interfaces:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ bond_1g:
+ address: 10.253.168.131
+ netmask: 255.255.248.0
+ gateway: 10.253.168.1
+ mtu: '1500'
+ bond_mode: ActivePassive
+ dns_nameservers: dns1,dns2
+ dns_search: domain1,domain2
+ bond_10g:
+ address: 10.253.1.202
+ netmask: 255.255.255.192
+ gateway: 10.253.1.193
+ mtu: '9000'
+ bond_mode: LACP
+ bond_lacp_rate: Fast
+ virtual_network_tag: vnet_tag
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+try:
+ from solidfire.models import Network, NetworkConfig
+ from solidfire.common import ApiConnectionError as sf_ApiConnectionError, ApiServerError as sf_ApiServerError
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWNetworkInterfaces(object):
+ """
+ Element Software Network Interfaces - Bond 1G and 10G Network configuration
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ method=dict(required=False, type='str'),
+ ip_address_1g=dict(required=False, type='str'),
+ ip_address_10g=dict(required=False, type='str'),
+ subnet_1g=dict(required=False, type='str'),
+ subnet_10g=dict(required=False, type='str'),
+ gateway_address_1g=dict(required=False, type='str'),
+ gateway_address_10g=dict(required=False, type='str'),
+ mtu_1g=dict(required=False, type='str'),
+ mtu_10g=dict(required=False, type='str'),
+ dns_nameservers=dict(required=False, type='list', elements='str'),
+ dns_search_domains=dict(required=False, type='list', elements='str'),
+ bond_mode_1g=dict(required=False, type='str'),
+ bond_mode_10g=dict(required=False, type='str'),
+ lacp_1g=dict(required=False, type='str'),
+ lacp_10g=dict(required=False, type='str'),
+ virtual_network_tag=dict(required=False, type='str'),
+ bond_1g=dict(required=False, type='dict', options=dict(
+ address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ mtu=dict(required=False, type='str'),
+ dns_nameservers=dict(required=False, type='list', elements='str'),
+ dns_search=dict(required=False, type='list', elements='str'),
+ bond_mode=dict(required=False, type='str', choices=['ActivePassive', 'ALB', 'LACP']),
+ bond_lacp_rate=dict(required=False, type='str', choices=['Fast', 'Slow']),
+ virtual_network_tag=dict(required=False, type='str'),
+ )),
+ bond_10g=dict(required=False, type='dict', options=dict(
+ address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ mtu=dict(required=False, type='str'),
+ dns_nameservers=dict(required=False, type='list', elements='str'),
+ dns_search=dict(required=False, type='list', elements='str'),
+ bond_mode=dict(required=False, type='str', choices=['ActivePassive', 'ALB', 'LACP']),
+ bond_lacp_rate=dict(required=False, type='str', choices=['Fast', 'Slow']),
+ virtual_network_tag=dict(required=False, type='str'),
+ )),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ input_params = self.module.params
+ self.fail_when_deprecated_options_are_set(input_params)
+
+ self.bond1g = input_params['bond_1g']
+ self.bond10g = input_params['bond_10g']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ # increase time out, as it may take 30 seconds when making a change
+ self.sfe = netapp_utils.create_sf_connection(module=self.module, port=442, timeout=90)
+
+ def fail_when_deprecated_options_are_set(self, input_params):
+ ''' report an error and exit if any deprecated options is set '''
+
+ dparms_1g = [x for x in ('ip_address_1g', 'subnet_1g', 'gateway_address_1g', 'mtu_1g', 'bond_mode_1g', 'lacp_1g')
+ if input_params[x] is not None]
+ dparms_10g = [x for x in ('ip_address_10g', 'subnet_10g', 'gateway_address_10g', 'mtu_10g', 'bond_mode_10g', 'lacp_10g')
+ if input_params[x] is not None]
+ dparms_common = [x for x in ('dns_nameservers', 'dns_search_domains', 'virtual_network_tag')
+ if input_params[x] is not None]
+
+ error_msg = ''
+ if dparms_1g and dparms_10g:
+ error_msg = 'Please use the new bond_1g and bond_10g options to configure the bond interfaces.'
+ elif dparms_1g:
+ error_msg = 'Please use the new bond_1g option to configure the bond 1G interface.'
+ elif dparms_10g:
+ error_msg = 'Please use the new bond_10g option to configure the bond 10G interface.'
+ elif dparms_common:
+ error_msg = 'Please use the new bond_1g or bond_10g options to configure the bond interfaces.'
+ if input_params['method']:
+ error_msg = 'This module cannot set or change "method". ' + error_msg
+ dparms_common.append('method')
+ if error_msg:
+ error_msg += ' The following parameters are deprecated and cannot be used: '
+ dparms = dparms_1g
+ dparms.extend(dparms_10g)
+ dparms.extend(dparms_common)
+ error_msg += ', '.join(dparms)
+ self.module.fail_json(msg=error_msg)
+
+ def set_network_config(self, network_object):
+ """
+ set network configuration
+ """
+ try:
+ self.sfe.set_network_config(network=network_object)
+ except (sf_ApiConnectionError, sf_ApiServerError) as exception_object:
+ self.module.fail_json(msg='Error setting network config for node %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def set_network_config_object(self, network_params):
+ ''' set SolidFire network config object '''
+ network_config = dict()
+ if network_params is not None:
+ for key in network_params:
+ if network_params[key] is not None:
+ network_config[key] = network_params[key]
+ if network_config:
+ return NetworkConfig(**network_config)
+ return None
+
+ def set_network_object(self):
+ """
+ Set Element SW Network object
+ :description: set Network object
+
+ :return: Network object
+ :rtype: object(Network object)
+ """
+ bond_1g_network = self.set_network_config_object(self.bond1g)
+ bond_10g_network = self.set_network_config_object(self.bond10g)
+ network_object = None
+ if bond_1g_network is not None or bond_10g_network is not None:
+ network_object = Network(bond1_g=bond_1g_network,
+ bond10_g=bond_10g_network)
+ return network_object
+
+ def apply(self):
+ """
+ Check connection and initialize node with cluster ownership
+ """
+ changed = False
+ result_message = None
+ network_object = self.set_network_object()
+ if network_object is not None:
+ if not self.module.check_mode:
+ self.set_network_config(network_object)
+ changed = True
+ else:
+ result_message = "Skipping changes, No change requested"
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ elementsw_network_interfaces = ElementSWNetworkInterfaces()
+ elementsw_network_interfaces.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py
new file mode 100644
index 00000000..d1412f2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Node Operation
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_node
+
+short_description: NetApp Element Software Node Operation
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Add, remove cluster node on Element Software Cluster.
+ - Set cluster name on node.
+ - When using the preset_only option, hostname/username/password are required but not used.
+
+options:
+ state:
+ description:
+ - Element Software Storage Node operation state.
+ - present - To add pending node to participate in cluster data storage.
+ - absent - To remove node from active cluster. A node cannot be removed if active drives are present.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ node_ids:
+ description:
+ - List of IDs or Names or IP Addresses of nodes to add or remove.
+ - If cluster_name is set, node MIPs are required.
+ type: list
+ elements: str
+ required: true
+ aliases: ['node_id']
+
+ cluster_name:
+ description:
+ - If set, the current node configuration is updated with this name before adding the node to the cluster.
+ - This requires the node_ids to be specified as MIPs (Management IP Adresses)
+ type: str
+ version_added: 20.9.0
+
+ preset_only:
+ description:
+ - If true and state is 'present', set the cluster name for each node in node_ids, but do not add the nodes.
+ - They can be added using na_elementsw_cluster for initial cluster creation.
+ - If false, proceed with addition/removal.
+ type: bool
+ default: false
+ version_added: 20.9.0
+'''
+
+EXAMPLES = """
+ - name: Add node from pending to active cluster
+ tags:
+ - elementsw_add_node
+ na_elementsw_node:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ node_id: sf4805-meg-03
+
+ - name: Remove active node from cluster
+ tags:
+ - elementsw_remove_node
+ na_elementsw_node:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ node_id: 13
+
+ - name: Add node from pending to active cluster using node IP
+ tags:
+ - elementsw_add_node_ip
+ na_elementsw_node:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ node_id: 10.109.48.65
+ cluster_name: sfcluster01
+
+ - name: Only set cluster name
+ tags:
+ - elementsw_add_node_ip
+ na_elementsw_node:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ node_ids: 10.109.48.65,10.109.48.66
+ cluster_name: sfcluster01
+ preset_only: true
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWNode(object):
+ """
+ Element SW Storage Node operations
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ node_ids=dict(required=True, type='list', elements='str', aliases=['node_id']),
+ cluster_name=dict(required=False, type='str'),
+ preset_only=dict(required=False, type='bool', default=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.state = input_params['state']
+ self.node_ids = input_params['node_ids']
+ self.cluster_name = input_params['cluster_name']
+ self.preset_only = input_params['preset_only']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ elif not self.preset_only:
+ # Cluster connection is only needed for add/delete operations
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def check_node_has_active_drives(self, node_id=None):
+ """
+ Check if node has active drives attached to cluster
+ :description: Validate if node have active drives in cluster
+
+ :return: True or False
+ :rtype: bool
+ """
+ if node_id is not None:
+ cluster_drives = self.sfe.list_drives()
+ for drive in cluster_drives.drives:
+ if drive.node_id == node_id and drive.status == "active":
+ return True
+ return False
+
+ @staticmethod
+ def extract_node_info(node_list):
+ summary = list()
+ for node in node_list:
+ node_dict = dict()
+ for key, value in vars(node).items():
+ if key in ['assigned_node_id', 'cip', 'mip', 'name', 'node_id', 'pending_node_id', 'sip']:
+ node_dict[key] = value
+ summary.append(node_dict)
+ return summary
+
+ def get_node_list(self):
+ """
+ Get Node List
+ :description: Find and retrieve node_ids from the active cluster
+
+ :return: None
+ :rtype: None
+ """
+ action_nodes_list = list()
+ if len(self.node_ids) > 0:
+ unprocessed_node_list = list(self.node_ids)
+ list_nodes = []
+ try:
+ all_nodes = self.sfe.list_all_nodes()
+ except netapp_utils.solidfire.common.ApiServerError as exception_object:
+ self.module.fail_json(msg='Error getting list of nodes from cluster: %s' % to_native(exception_object),
+ exception=traceback.format_exc())
+
+ # For add operation lookup for nodes list with status pendingNodes list
+ # else nodes will have to be traverse through active cluster
+ if self.state == "present":
+ list_nodes = all_nodes.pending_nodes
+ else:
+ list_nodes = all_nodes.nodes
+
+ for current_node in list_nodes:
+ if self.state == "absent" and \
+ (str(current_node.node_id) in self.node_ids or current_node.name in self.node_ids or current_node.mip in self.node_ids):
+ if self.check_node_has_active_drives(current_node.node_id):
+ self.module.fail_json(msg='Error deleting node %s: node has active drives' % current_node.name)
+ else:
+ action_nodes_list.append(current_node.node_id)
+ if self.state == "present" and \
+ (str(current_node.pending_node_id) in self.node_ids or current_node.name in self.node_ids or current_node.mip in self.node_ids):
+ action_nodes_list.append(current_node.pending_node_id)
+
+ # report an error if state == present and node is unknown
+ if self.state == "present":
+ for current_node in all_nodes.nodes:
+ if str(current_node.node_id) in unprocessed_node_list:
+ unprocessed_node_list.remove(str(current_node.node_id))
+ elif current_node.name in unprocessed_node_list:
+ unprocessed_node_list.remove(current_node.name)
+ elif current_node.mip in unprocessed_node_list:
+ unprocessed_node_list.remove(current_node.mip)
+ for current_node in all_nodes.pending_nodes:
+ if str(current_node.pending_node_id) in unprocessed_node_list:
+ unprocessed_node_list.remove(str(current_node.pending_node_id))
+ elif current_node.name in unprocessed_node_list:
+ unprocessed_node_list.remove(current_node.name)
+ elif current_node.mip in unprocessed_node_list:
+ unprocessed_node_list.remove(current_node.mip)
+ if len(unprocessed_node_list) > 0:
+ summary = dict(
+ nodes=self.extract_node_info(all_nodes.nodes),
+ pending_nodes=self.extract_node_info(all_nodes.pending_nodes),
+ pending_active_nodes=self.extract_node_info(all_nodes.pending_active_nodes)
+ )
+ self.module.fail_json(msg='Error adding nodes %s: nodes not in pending or active lists: %s' %
+ (to_native(unprocessed_node_list), repr(summary)))
+ return action_nodes_list
+
+ def add_node(self, nodes_list=None):
+ """
+ Add Node that are on PendingNodes list available on Cluster
+ """
+ try:
+ self.sfe.add_nodes(nodes_list, auto_install=True)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error adding nodes %s to cluster: %s' % (nodes_list, to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def remove_node(self, nodes_list=None):
+ """
+ Remove active node from Cluster
+ """
+ try:
+ self.sfe.remove_nodes(nodes_list)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error removing nodes %s from cluster %s' % (nodes_list, to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def set_cluster_name(self, node):
+ ''' set up cluster name for the node using its MIP '''
+ cluster = dict(cluster=self.cluster_name)
+ port = 442
+ try:
+ node_cx = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, hostname=node, port=port)
+ except netapp_utils.solidfire.common.ApiConnectionError as exc:
+ if str(exc) == "Bad Credentials":
+ msg = 'Most likely the node %s is already in a cluster.' % node
+ msg += ' Make sure to use valid node credentials for username and password.'
+ msg += ' Node reported: %s' % repr(exc)
+ else:
+ msg = 'Failed to create connection: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+ except Exception as exc:
+ self.module.fail_json(msg='Failed to connect to %s:%d - %s' % (node, port, to_native(exc)),
+ exception=traceback.format_exc())
+
+ try:
+ cluster_config = node_cx.get_cluster_config()
+ except netapp_utils.solidfire.common.ApiServerError as exc:
+ self.module.fail_json(msg='Error getting cluster config: %s' % to_native(exc),
+ exception=traceback.format_exc())
+
+ if cluster_config.cluster.cluster == self.cluster_name:
+ return False
+ if cluster_config.cluster.state == 'Active':
+ self.module.fail_json(msg="Error updating cluster name for node %s, already in 'Active' state"
+ % node, cluster_config=repr(cluster_config))
+ if self.module.check_mode:
+ return True
+
+ try:
+ node_cx.set_cluster_config(cluster)
+ except netapp_utils.solidfire.common.ApiServerError as exc:
+ self.module.fail_json(msg='Error updating cluster name: %s' % to_native(exc),
+ cluster_config=repr(cluster_config),
+ exception=traceback.format_exc())
+ return True
+
+ def apply(self):
+ """
+ Check, process and initiate Cluster Node operation
+ """
+ changed = False
+ updated_nodes = list()
+ result_message = ''
+ if self.state == "present" and self.cluster_name is not None:
+ for node in self.node_ids:
+ if self.set_cluster_name(node):
+ changed = True
+ updated_nodes.append(node)
+ if not self.preset_only:
+ # let's see if there is anything to add or remove
+ action_nodes_list = self.get_node_list()
+ action = None
+ if self.state == "present" and len(action_nodes_list) > 0:
+ changed = True
+ action = 'added'
+ if not self.module.check_mode:
+ self.add_node(action_nodes_list)
+ elif self.state == "absent" and len(action_nodes_list) > 0:
+ changed = True
+ action = 'removed'
+ if not self.module.check_mode:
+ self.remove_node(action_nodes_list)
+ if action:
+ result_message = 'List of %s nodes: %s - requested: %s' % (action, to_native(action_nodes_list), to_native(self.node_ids))
+ if updated_nodes:
+ result_message += '\n' if result_message else ''
+ result_message += 'List of updated nodes with %s: %s' % (self.cluster_name, updated_nodes)
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+
+ na_elementsw_node = ElementSWNode()
+ na_elementsw_node.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py
new file mode 100644
index 00000000..a7defb0e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software QOS Policy
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_qos_policy
+
+short_description: NetApp Element Software create/modify/rename/delete QOS Policy
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 20.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, modify, rename, or delete QOS policy on Element Software Cluster.
+
+options:
+
+ state:
+ description:
+ - Whether the specified QOS policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - Name or id for the QOS policy.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name or id for the QOS policy to be renamed.
+ type: str
+
+ qos:
+ description:
+ - The quality of service (QQOS) for the policy.
+ - Required for create
+ - Supported keys are minIOPS, maxIOPS, burstIOPS
+ type: dict
+'''
+
+EXAMPLES = """
+ - name: Add QOS Policy
+ na_elementsw_qos_policy:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: gold
+ qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
+
+ - name: Modify QOS Policy
+ na_elementsw_qos_policy:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: gold
+ qos: {minIOPS: 100, maxIOPS: 5000, burstIOPS: 20000}
+
+ - name: Rename QOS Policy
+ na_elementsw_qos_policy:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ from_name: gold
+ name: silver
+
+ - name: Remove QOS Policy
+ na_elementsw_qos_policy:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: silver
+"""
+
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWQosPolicy(object):
+ """
+ Element Software QOS Policy
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ qos=dict(required=False, type='dict'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # Set up state variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.qos_policy_id = None
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_qos_policy')
+
+ def get_qos_policy(self, name):
+ """
+ Get QOS Policy
+ """
+ policy, error = self.elementsw_helper.get_qos_policy(name)
+ if error is not None:
+ self.module.fail_json(msg=error, exception=traceback.format_exc())
+ return policy
+
+ def create_qos_policy(self, name, qos):
+ """
+ Create the QOS Policy
+ """
+ try:
+ self.sfe.create_qos_policy(name=name, qos=qos)
+ except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc:
+ self.module.fail_json(msg="Error creating qos policy: %s: %s" %
+ (name, to_native(exc)), exception=traceback.format_exc())
+
+ def update_qos_policy(self, qos_policy_id, modify, name=None):
+ """
+ Update the QOS Policy if the policy already exists
+ """
+ options = dict(
+ qos_policy_id=qos_policy_id
+ )
+ if name is not None:
+ options['name'] = name
+ if 'qos' in modify:
+ options['qos'] = modify['qos']
+
+ try:
+ self.sfe.modify_qos_policy(**options)
+ except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc:
+ self.module.fail_json(msg="Error updating qos policy: %s: %s" %
+ (self.parameters['from_name'] if name is not None else self.parameters['name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_qos_policy(self, qos_policy_id):
+ """
+ Delete the QOS Policy
+ """
+ try:
+ self.sfe.delete_qos_policy(qos_policy_id=qos_policy_id)
+ except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc:
+ self.module.fail_json(msg="Error deleting qos policy: %s: %s" %
+ (self.parameters['name'], to_native(exc)), exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Process the create/delete/rename/modify actions for qos policy on the Element Software Cluster
+ """
+ modify = dict()
+ current = self.get_qos_policy(self.parameters['name'])
+ qos_policy_id = None if current is None else current['qos_policy_id']
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name') is not None:
+ from_qos_policy = self.get_qos_policy(self.parameters['from_name'])
+ if from_qos_policy is None:
+ self.module.fail_json(msg="Error renaming qos policy, no existing policy with name/id: %s" % self.parameters['from_name'])
+ cd_action = 'rename'
+ qos_policy_id = from_qos_policy['qos_policy_id']
+ self.na_helper.changed = True
+ modify = self.na_helper.get_modified_attributes(from_qos_policy, self.parameters)
+ if cd_action == 'create' and 'qos' not in self.parameters:
+ self.module.fail_json(msg="Error creating qos policy: %s, 'qos:' option is required" % self.parameters['name'])
+
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_qos_policy(self.parameters['name'], self.parameters['qos'])
+ elif cd_action == 'delete':
+ self.delete_qos_policy(qos_policy_id)
+ elif cd_action == 'rename':
+ self.update_qos_policy(qos_policy_id, modify, name=self.parameters['name'])
+ elif modify:
+ self.update_qos_policy(qos_policy_id, modify)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_qos_policy = ElementSWQosPolicy()
+ na_elementsw_qos_policy.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py
new file mode 100644
index 00000000..23144e42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element OS Software Snapshot Manager
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_snapshot
+
+short_description: NetApp Element Software Manage Snapshots
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, Modify or Delete Snapshot on Element OS Cluster.
+
+options:
+ name:
+ description:
+ - Name of new snapshot create.
+ - If unspecified, date and time when the snapshot was taken is used.
+ type: str
+
+ state:
+ description:
+ - Whether the specified snapshot should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ src_volume_id:
+ description:
+ - ID or Name of active volume.
+ required: true
+ type: str
+
+ account_id:
+ description:
+ - Account ID or Name of Parent/Source Volume.
+ required: true
+ type: str
+
+ retention:
+ description:
+ - Retention period for the snapshot.
+ - Format is 'HH:mm:ss'.
+ type: str
+
+ src_snapshot_id:
+ description:
+ - ID or Name of an existing snapshot.
+ - Required when C(state=present), to modify snapshot properties.
+ - Required when C(state=present), to create snapshot from another snapshot in the volume.
+ - Required when C(state=absent), to delete snapshot.
+ type: str
+
+ enable_remote_replication:
+ description:
+ - Flag, whether to replicate the snapshot created to a remote replication cluster.
+ - To enable specify 'true' value.
+ type: bool
+
+ snap_mirror_label:
+ description:
+ - Label used by SnapMirror software to specify snapshot retention policy on SnapMirror endpoint.
+ type: str
+
+ expiration_time:
+ description:
+ - The date and time (format ISO 8601 date string) at which this snapshot will expire.
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create snapshot
+ tags:
+ - elementsw_create_snapshot
+ na_elementsw_snapshot:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ src_volume_id: 118
+ account_id: sagarsh
+ name: newsnapshot-1
+
+ - name: Modify Snapshot
+ tags:
+ - elementsw_modify_snapshot
+ na_elementsw_snapshot:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ src_volume_id: sagarshansivolume
+ src_snapshot_id: test1
+ account_id: sagarsh
+ expiration_time: '2018-06-16T12:24:56Z'
+ enable_remote_replication: false
+
+ - name: Delete Snapshot
+ tags:
+ - elementsw_delete_snapshot
+ na_elementsw_snapshot:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ src_snapshot_id: deltest1
+ account_id: sagarsh
+ src_volume_id: sagarshansivolume
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementOSSnapshot(object):
+ """
+ Element OS Snapshot Manager
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ account_id=dict(required=True, type='str'),
+ name=dict(required=False, type='str'),
+ src_volume_id=dict(required=True, type='str'),
+ retention=dict(required=False, type='str'),
+ src_snapshot_id=dict(required=False, type='str'),
+ enable_remote_replication=dict(required=False, type='bool'),
+ expiration_time=dict(required=False, type='str'),
+ snap_mirror_label=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.state = input_params['state']
+ self.name = input_params['name']
+ self.account_id = input_params['account_id']
+ self.src_volume_id = input_params['src_volume_id']
+ self.src_snapshot_id = input_params['src_snapshot_id']
+ self.retention = input_params['retention']
+ self.properties_provided = False
+
+ self.expiration_time = input_params['expiration_time']
+ if input_params['expiration_time'] is not None:
+ self.properties_provided = True
+
+ self.enable_remote_replication = input_params['enable_remote_replication']
+ if input_params['enable_remote_replication'] is not None:
+ self.properties_provided = True
+
+ self.snap_mirror_label = input_params['snap_mirror_label']
+ if input_params['snap_mirror_label'] is not None:
+ self.properties_provided = True
+
+ if self.state == 'absent' and self.src_snapshot_id is None:
+ self.module.fail_json(
+ msg="Please provide required parameter : snapshot_id")
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_snapshot')
+
+ def get_account_id(self):
+ """
+ Return account id if found
+ """
+ try:
+ # Update and return self.account_id
+ self.account_id = self.elementsw_helper.account_exists(self.account_id)
+ return self.account_id
+ except Exception as err:
+ self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
+
+ def get_src_volume_id(self):
+ """
+ Return volume id if found
+ """
+ src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id)
+ if src_vol_id is not None:
+ # Update and return self.volume_id
+ self.src_volume_id = src_vol_id
+ # Return src_volume_id
+ return self.src_volume_id
+ return None
+
+ def get_snapshot(self, name=None):
+ """
+ Return snapshot details if found
+ """
+ src_snapshot = None
+ if name is not None:
+ src_snapshot = self.elementsw_helper.get_snapshot(name, self.src_volume_id)
+ elif self.src_snapshot_id is not None:
+ src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id)
+ if src_snapshot is not None:
+ # Update self.src_snapshot_id
+ self.src_snapshot_id = src_snapshot.snapshot_id
+ # Return src_snapshot
+ return src_snapshot
+
+ def create_snapshot(self):
+ """
+ Create Snapshot
+ """
+ try:
+ self.sfe.create_snapshot(volume_id=self.src_volume_id,
+ snapshot_id=self.src_snapshot_id,
+ name=self.name,
+ enable_remote_replication=self.enable_remote_replication,
+ retention=self.retention,
+ snap_mirror_label=self.snap_mirror_label,
+ attributes=self.attributes)
+ except Exception as exception_object:
+ self.module.fail_json(
+ msg='Error creating snapshot %s' % (
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def modify_snapshot(self):
+ """
+ Modify Snapshot Properties
+ """
+ try:
+ self.sfe.modify_snapshot(snapshot_id=self.src_snapshot_id,
+ expiration_time=self.expiration_time,
+ enable_remote_replication=self.enable_remote_replication,
+ snap_mirror_label=self.snap_mirror_label)
+ except Exception as exception_object:
+ self.module.fail_json(
+ msg='Error modify snapshot %s' % (
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def delete_snapshot(self):
+ """
+ Delete Snapshot
+ """
+ try:
+ self.sfe.delete_snapshot(snapshot_id=self.src_snapshot_id)
+ except Exception as exception_object:
+ self.module.fail_json(
+ msg='Error delete snapshot %s' % (
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Check, process and initiate snapshot operation
+ """
+ changed = False
+ result_message = None
+ self.get_account_id()
+
+ # Dont proceed if source volume is not found
+ if self.get_src_volume_id() is None:
+ self.module.fail_json(msg="Volume id not found %s" % self.src_volume_id)
+
+ # Get snapshot details using source volume
+ snapshot_detail = self.get_snapshot()
+
+ if snapshot_detail:
+ if self.properties_provided:
+ if self.expiration_time != snapshot_detail.expiration_time:
+ changed = True
+ else: # To preserve value in case parameter expiration_time is not defined/provided.
+ self.expiration_time = snapshot_detail.expiration_time
+
+ if self.enable_remote_replication != snapshot_detail.enable_remote_replication:
+ changed = True
+ else: # To preserve value in case parameter enable_remote_Replication is not defined/provided.
+ self.enable_remote_replication = snapshot_detail.enable_remote_replication
+
+ if self.snap_mirror_label != snapshot_detail.snap_mirror_label:
+ changed = True
+ else: # To preserve value in case parameter snap_mirror_label is not defined/provided.
+ self.snap_mirror_label = snapshot_detail.snap_mirror_label
+
+ if self.account_id is None or self.src_volume_id is None or self.module.check_mode:
+ changed = False
+ result_message = "Check mode, skipping changes"
+ elif self.state == 'absent' and snapshot_detail is not None:
+ self.delete_snapshot()
+ changed = True
+ elif self.state == 'present' and snapshot_detail is not None:
+ if changed:
+ self.modify_snapshot() # Modify Snapshot properties
+ elif not self.properties_provided:
+ if self.name is not None:
+ snapshot = self.get_snapshot(self.name)
+ # If snapshot with name already exists return without performing any action
+ if snapshot is None:
+ self.create_snapshot() # Create Snapshot using parent src_snapshot_id
+ changed = True
+ else:
+ self.create_snapshot()
+ changed = True
+ elif self.state == 'present':
+ if self.name is not None:
+ snapshot = self.get_snapshot(self.name)
+ # If snapshot with name already exists return without performing any action
+ if snapshot is None:
+ self.create_snapshot() # Create Snapshot using parent src_snapshot_id
+ changed = True
+ else:
+ self.create_snapshot()
+ changed = True
+ else:
+ changed = False
+ result_message = "No changes requested, skipping changes"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+
+ na_elementsw_snapshot = ElementOSSnapshot()
+ na_elementsw_snapshot.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py
new file mode 100644
index 00000000..1e9d8e59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Snapshot Restore
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_snapshot_restore
+
+short_description: NetApp Element Software Restore Snapshot
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Element OS Cluster restore snapshot to volume.
+
+options:
+
+ src_volume_id:
+ description:
+ - ID or Name of source active volume.
+ required: true
+ type: str
+
+ src_snapshot_id:
+ description:
+ - ID or Name of an existing snapshot.
+ required: true
+ type: str
+
+ dest_volume_name:
+ description:
+ - New Name of destination for restoring the snapshot
+ required: true
+ type: str
+
+ account_id:
+ description:
+ - Account ID or Name of Parent/Source Volume.
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Restore snapshot to volume
+ tags:
+ - elementsw_create_snapshot_restore
+ na_elementsw_snapshot_restore:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ account_id: ansible-1
+ src_snapshot_id: snapshot_20171021
+ src_volume_id: volume-playarea
+ dest_volume_name: dest-volume-area
+
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementOSSnapshotRestore(object):
+ """
+ Element OS Restore from snapshot
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ account_id=dict(required=True, type='str'),
+ src_volume_id=dict(required=True, type='str'),
+ dest_volume_name=dict(required=True, type='str'),
+ src_snapshot_id=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.account_id = input_params['account_id']
+ self.src_volume_id = input_params['src_volume_id']
+ self.dest_volume_name = input_params['dest_volume_name']
+ self.src_snapshot_id = input_params['src_snapshot_id']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_snapshot_restore')
+
+ def get_account_id(self):
+ """
+ Get account id if found
+ """
+ try:
+ # Update and return self.account_id
+ self.account_id = self.elementsw_helper.account_exists(self.account_id)
+ return self.account_id
+ except Exception as err:
+ self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
+
+ def get_snapshot_id(self):
+ """
+ Return snapshot details if found
+ """
+ src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id)
+ # Update and return self.src_snapshot_id
+ if src_snapshot:
+ self.src_snapshot_id = src_snapshot.snapshot_id
+ # Return self.src_snapshot_id
+ return self.src_snapshot_id
+ return None
+
+ def restore_snapshot(self):
+ """
+ Restore Snapshot to Volume
+ """
+ try:
+ self.sfe.clone_volume(volume_id=self.src_volume_id,
+ name=self.dest_volume_name,
+ snapshot_id=self.src_snapshot_id,
+ attributes=self.attributes)
+ except Exception as exception_object:
+ self.module.fail_json(
+ msg='Error restore snapshot %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Check, process and initiate restore snapshot to volume operation
+ """
+ changed = False
+ result_message = None
+ self.get_account_id()
+ src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id)
+
+ if src_vol_id is not None:
+ # Update self.src_volume_id
+ self.src_volume_id = src_vol_id
+ if self.get_snapshot_id() is not None:
+ # Addressing idempotency by comparing volume does not exist with same volume name
+ if self.elementsw_helper.volume_exists(self.dest_volume_name, self.account_id) is None:
+ self.restore_snapshot()
+ changed = True
+ else:
+ result_message = "No changes requested, Skipping changes"
+ else:
+ self.module.fail_json(msg="Snapshot id not found %s" % self.src_snapshot_id)
+ else:
+ self.module.fail_json(msg="Volume id not found %s" % self.src_volume_id)
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_snapshot_restore = ElementOSSnapshotRestore()
+ na_elementsw_snapshot_restore.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py
new file mode 100644
index 00000000..2d6a7ee8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Element SW Software Snapshot Schedule"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_snapshot_schedule
+
+short_description: NetApp Element Software Snapshot Schedules
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update snapshot schedules on ElementSW
+
+options:
+
+ state:
+ description:
+ - Whether the specified schedule should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ paused:
+ description:
+ - Pause / Resume a schedule.
+ type: bool
+
+ recurring:
+ description:
+ - Should the schedule recur?
+ type: bool
+
+ schedule_type:
+ description:
+ - Schedule type for creating schedule.
+ choices: ['DaysOfWeekFrequency','DaysOfMonthFrequency','TimeIntervalFrequency']
+ type: str
+
+ time_interval_days:
+ description: Time interval in days.
+ type: int
+
+ time_interval_hours:
+ description: Time interval in hours.
+ type: int
+
+ time_interval_minutes:
+ description: Time interval in minutes.
+ type: int
+
+ days_of_week_weekdays:
+ description: List of days of the week (Sunday to Saturday)
+ type: list
+ elements: str
+
+ days_of_week_hours:
+ description: Time specified in hours
+ type: int
+
+ days_of_week_minutes:
+ description: Time specified in minutes.
+ type: int
+
+ days_of_month_monthdays:
+ description: List of days of the month (1-31)
+ type: list
+ elements: int
+
+ days_of_month_hours:
+ description: Time specified in hours
+ type: int
+
+ days_of_month_minutes:
+ description: Time specified in minutes.
+ type: int
+
+ name:
+ description:
+ - Name for the snapshot schedule.
+ - It accepts either schedule_id or schedule_name
+ - if name is digit, it will consider as schedule_id
+ - If name is string, it will consider as schedule_name
+ required: true
+ type: str
+
+ snapshot_name:
+ description:
+ - Name for the created snapshots.
+ type: str
+
+ volumes:
+ description:
+ - Volume IDs that you want to set the snapshot schedule for.
+ - It accepts both volume_name and volume_id
+ type: list
+ elements: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ - It accepts either account_name or account_id
+ - if account_id is digit, it will consider as account_id
+ - If account_id is string, it will consider as account_name
+ type: str
+
+ retention:
+ description:
+ - Retention period for the snapshot.
+ - Format is 'HH:mm:ss'.
+ type: str
+
+ starting_date:
+ description:
+ - Starting date for the schedule.
+ - Required when C(state=present).
+ - "Format: C(2016-12-01T00:00:00Z)"
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create Snapshot schedule
+ na_elementsw_snapshot_schedule:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: Schedule_A
+ schedule_type: TimeIntervalFrequency
+ time_interval_days: 1
+ starting_date: '2016-12-01T00:00:00Z'
+ retention: '24:00:00'
+ volumes:
+ - 7
+ - test
+ account_id: 1
+
+ - name: Update Snapshot schedule
+ na_elementsw_snapshot_schedule:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: Schedule_A
+ schedule_type: TimeIntervalFrequency
+ time_interval_days: 1
+ starting_date: '2016-12-01T00:00:00Z'
+ retention: '24:00:00'
+ volumes:
+ - 8
+ - test1
+ account_id: 1
+
+ - name: Delete Snapshot schedule
+ na_elementsw_snapshot_schedule:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: 6
+"""
+
+RETURN = """
+
+schedule_id:
+ description: Schedule ID of the newly created schedule
+ returned: success
+ type: str
+"""
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ from solidfire.custom.models import DaysOfWeekFrequency, Weekday, DaysOfMonthFrequency
+ from solidfire.common import ApiServerError
+ from solidfire.custom.models import TimeIntervalFrequency
+ from solidfire.models import Schedule, ScheduleInfo
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWSnapShotSchedule(object):
+ """
+ Contains methods to parse arguments,
+ derive details of ElementSW objects
+ and send requests to ElementSW via
+ the ElementSW SDK
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure SDK is installed
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ schedule_type=dict(required=False, choices=['DaysOfWeekFrequency', 'DaysOfMonthFrequency', 'TimeIntervalFrequency']),
+
+ time_interval_days=dict(required=False, type='int'),
+ time_interval_hours=dict(required=False, type='int'),
+ time_interval_minutes=dict(required=False, type='int'),
+
+ days_of_week_weekdays=dict(required=False, type='list', elements='str'),
+ days_of_week_hours=dict(required=False, type='int'),
+ days_of_week_minutes=dict(required=False, type='int'),
+
+ days_of_month_monthdays=dict(required=False, type='list', elements='int'),
+ days_of_month_hours=dict(required=False, type='int'),
+ days_of_month_minutes=dict(required=False, type='int'),
+
+ paused=dict(required=False, type='bool'),
+ recurring=dict(required=False, type='bool'),
+
+ starting_date=dict(required=False, type='str'),
+
+ snapshot_name=dict(required=False, type='str'),
+ volumes=dict(required=False, type='list', elements='str'),
+ account_id=dict(required=False, type='str'),
+ retention=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['account_id', 'volumes', 'schedule_type']),
+ ('schedule_type', 'DaysOfMonthFrequency', ['days_of_month_monthdays']),
+ ('schedule_type', 'DaysOfWeekFrequency', ['days_of_week_weekdays'])
+
+ ],
+ supports_check_mode=True
+ )
+
+ param = self.module.params
+
+ # set up state variables
+ self.state = param['state']
+ self.name = param['name']
+ self.schedule_type = param['schedule_type']
+ self.days_of_week_weekdays = param['days_of_week_weekdays']
+ self.days_of_week_hours = param['days_of_week_hours']
+ self.days_of_week_minutes = param['days_of_week_minutes']
+ self.days_of_month_monthdays = param['days_of_month_monthdays']
+ self.days_of_month_hours = param['days_of_month_hours']
+ self.days_of_month_minutes = param['days_of_month_minutes']
+ self.time_interval_days = param['time_interval_days']
+ self.time_interval_hours = param['time_interval_hours']
+ self.time_interval_minutes = param['time_interval_minutes']
+ self.paused = param['paused']
+ self.recurring = param['recurring']
+ if self.schedule_type == 'DaysOfWeekFrequency':
+ # Create self.weekday list if self.schedule_type is days_of_week
+ if self.days_of_week_weekdays is not None:
+ # Create self.weekday list if self.schedule_type is days_of_week
+ self.weekdays = []
+ for day in self.days_of_week_weekdays:
+ if str(day).isdigit():
+ # If id specified, return appropriate day
+ self.weekdays.append(Weekday.from_id(int(day)))
+ else:
+ # If name specified, return appropriate day
+ self.weekdays.append(Weekday.from_name(day.capitalize()))
+
+ if self.state == 'present' and self.schedule_type is None:
+ # Mandate schedule_type for create operation
+ self.module.fail_json(
+ msg="Please provide required parameter: schedule_type")
+
+ # Mandate schedule name for delete operation
+ if self.state == 'absent' and self.name is None:
+ self.module.fail_json(
+ msg="Please provide required parameter: name")
+
+ self.starting_date = param['starting_date']
+ self.snapshot_name = param['snapshot_name']
+ self.volumes = param['volumes']
+ self.account_id = param['account_id']
+ self.retention = param['retention']
+ self.create_schedule_result = None
+
+ if HAS_SF_SDK is False:
+ # Create ElementSW connection
+ self.module.fail_json(msg="Unable to import the ElementSW Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ def get_schedule(self):
+ # Checking whether schedule id is exist or not
+ # Return schedule details if found, None otherwise
+ # If exist set variable self.name
+ try:
+ schedule_list = self.sfe.list_schedules()
+ except ApiServerError:
+ return None
+
+ for schedule in schedule_list.schedules:
+ if schedule.to_be_deleted:
+ # skip this schedule if it is being deleted, it can as well not exist
+ continue
+ if str(schedule.schedule_id) == self.name:
+ self.name = schedule.name
+ return schedule
+ elif schedule.name == self.name:
+ return schedule
+ return None
+
+ def get_account_id(self):
+ # Validate account id
+ # Return account_id if found, None otherwise
+ try:
+ account_id = self.elementsw_helper.account_exists(self.account_id)
+ return account_id
+ except ApiServerError:
+ return None
+
+ def get_volume_id(self):
+ # Validate volume_ids
+ # Return volume ids if found, fail if not found
+ volume_ids = []
+ for volume in self.volumes:
+ volume_id = self.elementsw_helper.volume_exists(volume.strip(), self.account_id)
+ if volume_id:
+ volume_ids.append(volume_id)
+ else:
+ self.module.fail_json(msg='Specified volume %s does not exist' % volume)
+ return volume_ids
+
+ def get_frequency(self):
+ # Configuring frequency depends on self.schedule_type
+ frequency = None
+ if self.schedule_type is not None and self.schedule_type == 'DaysOfWeekFrequency':
+ if self.weekdays is not None:
+ params = dict(weekdays=self.weekdays)
+ if self.days_of_week_hours is not None:
+ params['hours'] = self.days_of_week_hours
+ if self.days_of_week_minutes is not None:
+ params['minutes'] = self.days_of_week_minutes
+ frequency = DaysOfWeekFrequency(**params)
+ elif self.schedule_type is not None and self.schedule_type == 'DaysOfMonthFrequency':
+ if self.days_of_month_monthdays is not None:
+ params = dict(monthdays=self.days_of_month_monthdays)
+ if self.days_of_month_hours is not None:
+ params['hours'] = self.days_of_month_hours
+ if self.days_of_month_minutes is not None:
+ params['minutes'] = self.days_of_month_minutes
+ frequency = DaysOfMonthFrequency(**params)
+ elif self.schedule_type is not None and self.schedule_type == 'TimeIntervalFrequency':
+ params = dict()
+ if self.time_interval_days is not None:
+ params['days'] = self.time_interval_days
+ if self.time_interval_hours is not None:
+ params['hours'] = self.time_interval_hours
+ if self.time_interval_minutes is not None:
+ params['minutes'] = self.time_interval_minutes
+ if not params or sum(params.values()) == 0:
+ self.module.fail_json(msg='Specify at least one non zero value with TimeIntervalFrequency.')
+ frequency = TimeIntervalFrequency(**params)
+ return frequency
+
+ def is_same_schedule_type(self, schedule_detail):
+ # To check schedule type is same or not
+ if str(schedule_detail.frequency).split('(')[0] == self.schedule_type:
+ return True
+ else:
+ return False
+
+ def create_schedule(self):
+ # Create schedule
+ try:
+ frequency = self.get_frequency()
+ if frequency is None:
+ self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type)
+
+ # Create schedule
+ name = self.name
+ schedule_info = ScheduleInfo(
+ volume_ids=self.volumes,
+ snapshot_name=self.snapshot_name,
+ retention=self.retention
+ )
+
+ sched = Schedule(schedule_info, name, frequency)
+ sched.paused = self.paused
+ sched.recurring = self.recurring
+ sched.starting_date = self.starting_date
+
+ self.create_schedule_result = self.sfe.create_schedule(sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_schedule(self, schedule_id):
+ # delete schedule
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=schedule_id)
+ sched = get_schedule_result.schedule
+ sched.to_be_deleted = True
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_schedule(self, schedule_id):
+ # Update schedule
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=schedule_id)
+ sched = get_schedule_result.schedule
+ # Update schedule properties
+ sched.frequency = self.get_frequency()
+ if sched.frequency is None:
+ self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type)
+
+ if self.volumes is not None and len(self.volumes) > 0:
+ sched.schedule_info.volume_ids = self.volumes
+ if self.retention is not None:
+ sched.schedule_info.retention = self.retention
+ if self.snapshot_name is not None:
+ sched.schedule_info.snapshot_name = self.snapshot_name
+ if self.paused is not None:
+ sched.paused = self.paused
+ if self.recurring is not None:
+ sched.recurring = self.recurring
+ if self.starting_date is not None:
+ sched.starting_date = self.starting_date
+
+ # Make API call
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ # Perform pre-checks, call functions and exit
+
+ changed = False
+ update_schedule = False
+
+ if self.account_id is not None:
+ self.account_id = self.get_account_id()
+
+ if self.state == 'present' and self.volumes is not None:
+ if self.account_id:
+ self.volumes = self.get_volume_id()
+ else:
+ self.module.fail_json(msg='Specified account id does not exist')
+
+ # Getting the schedule details
+ schedule_detail = self.get_schedule()
+
+ if schedule_detail is None and self.state == 'present':
+ if len(self.volumes) > 0:
+ changed = True
+ else:
+ self.module.fail_json(msg='Specified volumes not on cluster')
+ elif schedule_detail is not None:
+ # Getting the schedule id
+ if self.state == 'absent':
+ changed = True
+ else:
+ # Check if we need to update the snapshot schedule
+ if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
+ update_schedule = True
+ changed = True
+ elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
+ update_schedule = True
+ changed = True
+ elif self.paused is not None and schedule_detail.paused != self.paused:
+ update_schedule = True
+ changed = True
+ elif self.recurring is not None and schedule_detail.recurring != self.recurring:
+ update_schedule = True
+ changed = True
+ elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
+ update_schedule = True
+ changed = True
+ elif self.volumes is not None and len(self.volumes) > 0:
+ for volumeID in schedule_detail.schedule_info.volume_ids:
+ if volumeID not in self.volumes:
+ update_schedule = True
+ changed = True
+
+ temp_frequency = self.get_frequency()
+ if temp_frequency is not None:
+ # Checking schedule_type changes
+ if self.is_same_schedule_type(schedule_detail):
+ # If same schedule type
+ if self.schedule_type == "TimeIntervalFrequency":
+ # Check if there is any change in schedule.frequency, If schedule_type is time_interval
+ if schedule_detail.frequency.days != temp_frequency.days or \
+ schedule_detail.frequency.hours != temp_frequency.hours or \
+ schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+ elif self.schedule_type == "DaysOfMonthFrequency":
+ # Check if there is any change in schedule.frequency, If schedule_type is days_of_month
+ if len(schedule_detail.frequency.monthdays) != len(temp_frequency.monthdays) or \
+ schedule_detail.frequency.hours != temp_frequency.hours or \
+ schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+ elif len(schedule_detail.frequency.monthdays) == len(temp_frequency.monthdays):
+ actual_frequency_monthday = schedule_detail.frequency.monthdays
+ temp_frequency_monthday = temp_frequency.monthdays
+ for monthday in actual_frequency_monthday:
+ if monthday not in temp_frequency_monthday:
+ update_schedule = True
+ changed = True
+ elif self.schedule_type == "DaysOfWeekFrequency":
+ # Check if there is any change in schedule.frequency, If schedule_type is days_of_week
+ if len(schedule_detail.frequency.weekdays) != len(temp_frequency.weekdays) or \
+ schedule_detail.frequency.hours != temp_frequency.hours or \
+ schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+ elif len(schedule_detail.frequency.weekdays) == len(temp_frequency.weekdays):
+ actual_frequency_weekdays = schedule_detail.frequency.weekdays
+ temp_frequency_weekdays = temp_frequency.weekdays
+ if len([actual_weekday for actual_weekday, temp_weekday in
+ zip(actual_frequency_weekdays, temp_frequency_weekdays) if actual_weekday != temp_weekday]) != 0:
+ update_schedule = True
+ changed = True
+ else:
+ update_schedule = True
+ changed = True
+ else:
+ self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type)
+
+ result_message = " "
+ if changed:
+ if self.module.check_mode:
+ # Skip changes
+ result_message = "Check mode, skipping changes"
+ else:
+ if self.state == 'present':
+ if update_schedule:
+ self.update_schedule(schedule_detail.schedule_id)
+ result_message = "Snapshot Schedule modified"
+ else:
+ self.create_schedule()
+ result_message = "Snapshot Schedule created"
+ elif self.state == 'absent':
+ self.delete_schedule(schedule_detail.schedule_id)
+ result_message = "Snapshot Schedule deleted"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ v = ElementSWSnapShotSchedule()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py
new file mode 100644
index 00000000..299338ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_vlan
+
+short_description: NetApp Element Software Manage VLAN
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, modify VLAN
+
+options:
+
+ state:
+ description:
+ - Whether the specified vlan should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vlan_tag:
+ description:
+ - Virtual Network Tag
+ required: true
+ type: str
+
+ name:
+ description:
+ - User defined name for the new VLAN
+ - Name of the vlan is unique
+ - Required for create
+ type: str
+
+ svip:
+ description:
+ - Storage virtual IP which is unique
+ - Required for create
+ type: str
+
+ address_blocks:
+ description:
+ - List of address blocks for the VLAN
+ - Each address block contains the starting IP address and size for the block
+ - Required for create
+ type: list
+ elements: dict
+
+ netmask:
+ description:
+ - Netmask for the VLAN
+ - Required for create
+ type: str
+
+ gateway:
+ description:
+ - Gateway for the VLAN
+ type: str
+
+ namespace:
+ description:
+ - Enable or disable namespaces
+ type: bool
+
+ attributes:
+ description:
+ - Dictionary of attributes with name and value for each attribute
+ type: dict
+
+'''
+
+EXAMPLES = """
+- name: Create vlan
+ na_elementsw_vlan:
+ state: present
+ name: test
+ vlan_tag: 1
+ svip: "{{ ip address }}"
+ netmask: "{{ netmask }}"
+ address_blocks:
+ - start: "{{ starting ip_address }}"
+ size: 5
+ - start: "{{ starting ip_address }}"
+ size: 5
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Delete Lun
+ na_elementsw_vlan:
+ state: absent
+ vlan_tag: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWVlan(object):
+ """ class to handle VLAN operations """
+
+ def __init__(self):
+ """
+ Setup Ansible parameters and ElementSW connection
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ name=dict(required=False, type='str'),
+ vlan_tag=dict(required=True, type='str'),
+ svip=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ namespace=dict(required=False, type='bool'),
+ attributes=dict(required=False, type='dict'),
+ address_blocks=dict(required=False, type='list', elements='dict')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.elem = netapp_utils.create_sf_connection(module=self.module)
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.elementsw_helper = NaElementSWModule(self.elem)
+
+ # add telemetry attributes
+ if self.parameters.get('attributes') is not None:
+ self.parameters['attributes'].update(self.elementsw_helper.set_element_attributes(source='na_elementsw_vlan'))
+ else:
+ self.parameters['attributes'] = self.elementsw_helper.set_element_attributes(source='na_elementsw_vlan')
+
+ def validate_keys(self):
+ """
+ Validate if all required keys are present before creating
+ """
+ required_keys = ['address_blocks', 'svip', 'netmask', 'name']
+ if all(item in self.parameters.keys() for item in required_keys) is False:
+ self.module.fail_json(msg="One or more required fields %s for creating VLAN is missing"
+ % required_keys)
+ addr_blk_fields = ['start', 'size']
+ for address in self.parameters['address_blocks']:
+ if 'start' not in address or 'size' not in address:
+ self.module.fail_json(msg="One or more required fields %s for address blocks is missing"
+ % addr_blk_fields)
+
+ def create_network(self):
+ """
+ Add VLAN
+ """
+ try:
+ self.validate_keys()
+ create_params = self.parameters.copy()
+ for key in ['username', 'hostname', 'password', 'state', 'vlan_tag']:
+ del create_params[key]
+ self.elem.add_virtual_network(virtual_network_tag=self.parameters['vlan_tag'], **create_params)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error creating VLAN %s"
+ % self.parameters['vlan_tag'],
+ exception=to_native(err))
+
+ def delete_network(self):
+ """
+ Remove VLAN
+ """
+ try:
+ self.elem.remove_virtual_network(virtual_network_tag=self.parameters['vlan_tag'])
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error deleting VLAN %s"
+ % self.parameters['vlan_tag'],
+ exception=to_native(err))
+
+ def modify_network(self, modify):
+ """
+ Modify the VLAN
+ """
+ try:
+ self.elem.modify_virtual_network(virtual_network_tag=self.parameters['vlan_tag'], **modify)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error modifying VLAN %s"
+ % self.parameters['vlan_tag'],
+ exception=to_native(err))
+
+ def get_network_details(self):
+ """
+ Check existing VLANs
+ :return: vlan details if found, None otherwise
+ :type: dict
+ """
+ vlans = self.elem.list_virtual_networks(virtual_network_tag=self.parameters['vlan_tag'])
+ vlan_details = dict()
+ for vlan in vlans.virtual_networks:
+ if vlan is not None:
+ vlan_details['name'] = vlan.name
+ vlan_details['address_blocks'] = list()
+ for address in vlan.address_blocks:
+ vlan_details['address_blocks'].append({
+ 'start': address.start,
+ 'size': address.size
+ })
+ vlan_details['svip'] = vlan.svip
+ vlan_details['gateway'] = vlan.gateway
+ vlan_details['netmask'] = vlan.netmask
+ vlan_details['namespace'] = vlan.namespace
+ vlan_details['attributes'] = vlan.attributes
+ return vlan_details
+ return None
+
+ def apply(self):
+ """
+ Call create / delete / modify vlan methods
+ """
+ network = self.get_network_details()
+ # calling helper to determine action
+ cd_action = self.na_helper.get_cd_action(network, self.parameters)
+ modify = self.na_helper.get_modified_attributes(network, self.parameters)
+ if not self.module.check_mode:
+ if cd_action == "create":
+ self.create_network()
+ elif cd_action == "delete":
+ self.delete_network()
+ elif modify:
+ if 'attributes' in modify:
+ # new attributes will replace existing ones
+ modify['attributes'] = self.parameters['attributes']
+ self.modify_network(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """ Apply vlan actions """
+ network_obj = ElementSWVlan()
+ network_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py
new file mode 100644
index 00000000..3fcaf00c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Element OS Software Volume Manager"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_volume
+
+short_description: NetApp Element Software Manage Volumes
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update volumes on ElementSW
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - The name of the volume to manage.
+ - It accepts volume_name or volume_id
+ required: true
+ type: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ - It accepts Account_id or Account_name
+ required: true
+ type: str
+
+ enable512e:
+ description:
+ - Required when C(state=present)
+ - Should the volume provide 512-byte sector emulation?
+ type: bool
+ aliases:
+ - enable512emulation
+
+ qos:
+ description: Initial quality of service settings for this volume. Configure as dict in playbooks.
+ type: dict
+
+ qos_policy_name:
+ description:
+ - Quality of service policy for this volume.
+ - It can be a name or an id.
+ - Mutually exclusive with C(qos) option.
+ type: str
+
+ attributes:
+ description: A YAML dictionary of attributes that you would like to apply on this volume.
+ type: dict
+
+ size:
+ description:
+ - The size of the volume in (size_unit).
+ - Required when C(state = present).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ type: str
+
+ access:
+ description:
+ - Access allowed for the volume.
+ - readOnly Only read operations are allowed.
+ - readWrite Reads and writes are allowed.
+ - locked No reads or writes are allowed.
+ - replicationTarget Identify a volume as the target volume for a paired set of volumes.
+ - If the volume is not paired, the access status is locked.
+ - If unspecified, the access settings of the clone will be the same as the source.
+ choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create Volume
+ na_elementsw_volume:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: AnsibleVol
+ qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
+ account_id: 3
+ enable512e: False
+ size: 1
+ size_unit: gb
+
+ - name: Update Volume
+ na_elementsw_volume:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: AnsibleVol
+ account_id: 3
+ access: readWrite
+
+ - name: Delete Volume
+ na_elementsw_volume:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: AnsibleVol
+ account_id: 2
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWVolume(object):
+ """
+ Contains methods to parse arguments,
+ derive details of ElementSW objects
+ and send requests to ElementOS via
+ the ElementSW SDK
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure SDK is installed
+ """
+ self._size_unit_map = netapp_utils.SF_BYTE_MAP
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=True),
+ enable512e=dict(required=False, type='bool', aliases=['enable512emulation']),
+ qos=dict(required=False, type='dict', default=None),
+ qos_policy_name=dict(required=False, type='str', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+
+ access=dict(required=False, type='str', default=None,
+ choices=['readOnly', 'readWrite', 'locked', 'replicationTarget']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['size', 'enable512e'])
+ ],
+ mutually_exclusive=[
+ ('qos', 'qos_policy_name'),
+ ],
+ supports_check_mode=True
+ )
+
+ param = self.module.params
+
+ # set up state variables
+ self.state = param['state']
+ self.name = param['name']
+ self.account_id = param['account_id']
+ self.enable512e = param['enable512e']
+ self.qos = param['qos']
+ self.qos_policy_name = param['qos_policy_name']
+ self.attributes = param['attributes']
+ self.access = param['access']
+ self.size_unit = param['size_unit']
+ if param['size'] is not None:
+ self.size = param['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the ElementSW Python SDK")
+ else:
+ try:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+ except solidfire.common.ApiServerError:
+ self.module.fail_json(msg="Unable to create the connection")
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume')
+
+ def get_account_id(self):
+ """
+ Return account id if found
+ """
+ try:
+ # Update and return self.account_id
+ self.account_id = self.elementsw_helper.account_exists(self.account_id)
+ except Exception as err:
+ self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
+ return self.account_id
+
+ def get_qos_policy(self, name):
+ """
+ Get QOS Policy
+ """
+ policy, error = self.elementsw_helper.get_qos_policy(name)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ return policy
+
+ def get_volume(self):
+ """
+ Return volume details if found
+ """
+ # Get volume details
+ volume_id = self.elementsw_helper.volume_exists(self.name, self.account_id)
+
+ if volume_id is not None:
+ # Return volume_details
+ volume_details = self.elementsw_helper.get_volume(volume_id)
+ if volume_details is not None:
+ return volume_details
+ return None
+
+ def create_volume(self, qos_policy_id):
+ """
+ Create Volume
+ :return: True if created, False if fails
+ """
+ options = dict(
+ name=self.name,
+ account_id=self.account_id,
+ total_size=self.size,
+ enable512e=self.enable512e,
+ attributes=self.attributes
+ )
+ if qos_policy_id is not None:
+ options['qos_policy_id'] = qos_policy_id
+ if self.qos is not None:
+ options['qos'] = self.qos
+ try:
+ self.sfe.create_volume(**options)
+ except Exception as err:
+ self.module.fail_json(msg="Error provisioning volume: %s of size: %s" % (self.name, self.size),
+ exception=to_native(err))
+
+ def delete_volume(self, volume_id):
+ """
+ Delete and purge the volume using volume id
+ :return: Success : True , Failed : False
+ """
+ try:
+ self.sfe.delete_volume(volume_id=volume_id)
+ self.sfe.purge_deleted_volume(volume_id=volume_id)
+ # Delete method will delete and also purge the volume instead of moving the volume state to inactive.
+
+ except Exception as err:
+ # Throwing the exact error message instead of generic error message
+ self.module.fail_json(msg='Error deleting volume: %s, %s' % (str(volume_id), to_native(err)),
+ exception=to_native(err))
+
+ def update_volume(self, volume_id, qos_policy_id):
+ """
+ Update the volume with the specified param
+ :return: Success : True, Failed : False
+ """
+ options = dict(
+ attributes=self.attributes
+ )
+ if self.access is not None:
+ options['access'] = self.access
+ if self.account_id is not None:
+ options['account_id'] = self.account_id
+ if self.qos is not None:
+ options['qos'] = self.qos
+ if qos_policy_id is not None:
+ options['qos_policy_id'] = qos_policy_id
+ if self.size is not None:
+ options['total_size'] = self.size
+ try:
+ self.sfe.modify_volume(volume_id, **options)
+ except Exception as err:
+ # Throwing the exact error message instead of generic error message
+ self.module.fail_json(msg='Error updating volume: %s, %s' % (str(volume_id), to_native(err)),
+ exception=to_native(err))
+
+ def apply(self):
+ # Perform pre-checks, call functions and exit
+ changed = False
+ qos_policy_id = None
+ action = None
+
+ self.get_account_id()
+ volume_detail = self.get_volume()
+
+ if self.state == 'present' and self.qos_policy_name is not None:
+ policy = self.get_qos_policy(self.qos_policy_name)
+ if policy is None:
+ error = 'Cannot find qos policy with name/id: %s' % self.qos_policy_name
+ self.module.fail_json(msg=error)
+ qos_policy_id = policy['qos_policy_id']
+
+ if volume_detail:
+ volume_id = volume_detail.volume_id
+ if self.state == 'absent':
+ action = 'delete'
+
+ elif self.state == 'present':
+ # Checking all the params for update operation
+ if self.access is not None and volume_detail.access != self.access:
+ action = 'update'
+
+ if self.account_id is not None and volume_detail.account_id != self.account_id:
+ action = 'update'
+
+ if qos_policy_id is not None and volume_detail.qos_policy_id != qos_policy_id:
+ # volume_detail.qos_policy_id may be None if no policy is associated with the volume
+ action = 'update'
+
+ if self.qos is not None and volume_detail.qos_policy_id is not None:
+ # remove qos_policy
+ action = 'update'
+
+ if self.qos is not None:
+ # Actual volume_detail.qos has ['burst_iops', 'burst_time', 'curve', 'max_iops', 'min_iops'] keys.
+ # As only minOPS, maxOPS, burstOPS is important to consider, checking only these values.
+ volume_qos = vars(volume_detail.qos)
+ if volume_qos['min_iops'] != self.qos['minIOPS'] or volume_qos['max_iops'] != self.qos['maxIOPS'] \
+ or volume_qos['burst_iops'] != self.qos['burstIOPS']:
+ action = 'update'
+
+ if self.size is not None and volume_detail.total_size is not None and volume_detail.total_size != self.size:
+ size_difference = abs(float(volume_detail.total_size - self.size))
+ # Change size only if difference is bigger than 0.001
+ if size_difference / self.size > 0.001:
+ action = 'update'
+
+ if self.attributes is not None and volume_detail.attributes != self.attributes:
+ action = 'update'
+
+ elif self.state == 'present':
+ action = 'create'
+
+ result_message = ""
+
+ if action is not None:
+ changed = True
+ if self.module.check_mode:
+ result_message = "Check mode, skipping changes"
+ else:
+ if action == 'create':
+ self.create_volume(qos_policy_id)
+ result_message = "Volume created"
+ elif action == 'update':
+ self.update_volume(volume_id, qos_policy_id)
+ result_message = "Volume updated"
+ elif action == 'delete':
+ self.delete_volume(volume_id)
+ result_message = "Volume deleted"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ # Create object and call apply
+ na_elementsw_volume = ElementSWVolume()
+ na_elementsw_volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py
new file mode 100644
index 00000000..186ca85b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Element Software volume clone"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_volume_clone
+
+short_description: NetApp Element Software Create Volume Clone
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create volume clones on Element OS
+
+options:
+
+ name:
+ description:
+ - The name of the clone.
+ required: true
+ type: str
+
+ src_volume_id:
+ description:
+ - The id of the src volume to clone. id may be a numeric identifier or a volume name.
+ required: true
+ type: str
+
+ src_snapshot_id:
+ description:
+ - The id of the snapshot to clone. id may be a numeric identifier or a snapshot name.
+ type: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this cloned volume. id may be a numeric identifier or an account name.
+ required: true
+ type: str
+
+ attributes:
+ description: A YAML dictionary of attributes that you would like to apply on this cloned volume.
+ type: dict
+
+ size:
+ description:
+ - The size of the cloned volume in (size_unit).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ type: str
+
+ access:
+ choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
+ description:
+ - Access allowed for the volume.
+ - If unspecified, the access settings of the clone will be the same as the source.
+ - readOnly - Only read operations are allowed.
+ - readWrite - Reads and writes are allowed.
+ - locked - No reads or writes are allowed.
+ - replicationTarget - Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked.
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Clone Volume
+ na_elementsw_volume_clone:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ name: CloneAnsibleVol
+ src_volume_id: 123
+ src_snapshot_id: 41
+ account_id: 3
+ size: 1
+ size_unit: gb
+ access: readWrite
+ attributes: {"virtual_network_id": 12345}
+
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementOSVolumeClone(object):
+ """
+ Contains methods to parse arguments,
+ derive details of Element Software objects
+ and send requests to Element OS via
+ the Solidfire SDK
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure SDK is installed
+ """
+ self._size_unit_map = netapp_utils.SF_BYTE_MAP
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True),
+ src_volume_id=dict(required=True),
+ src_snapshot_id=dict(),
+ account_id=dict(required=True),
+ attributes=dict(type='dict', default=None),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ access=dict(type='str',
+ default=None, choices=['readOnly', 'readWrite',
+ 'locked', 'replicationTarget']),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ parameters = self.module.params
+
+ # set up state variables
+ self.name = parameters['name']
+ self.src_volume_id = parameters['src_volume_id']
+ self.src_snapshot_id = parameters['src_snapshot_id']
+ self.account_id = parameters['account_id']
+ self.attributes = parameters['attributes']
+
+ self.size_unit = parameters['size_unit']
+ if parameters['size'] is not None:
+ self.size = parameters['size'] * \
+ self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.access = parameters['access']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone')
+
+ def get_account_id(self):
+ """
+ Return account id if found
+ """
+ try:
+ # Update and return self.account_id
+ self.account_id = self.elementsw_helper.account_exists(self.account_id)
+ return self.account_id
+ except Exception as err:
+ self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
+
+ def get_snapshot_id(self):
+ """
+ Return snapshot details if found
+ """
+ src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id)
+ # Update and return self.src_snapshot_id
+ if src_snapshot is not None:
+ self.src_snapshot_id = src_snapshot.snapshot_id
+ # Return src_snapshot
+ return self.src_snapshot_id
+ return None
+
+ def get_src_volume_id(self):
+ """
+ Return volume id if found
+ """
+ src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id)
+ if src_vol_id is not None:
+ # Update and return self.volume_id
+ self.src_volume_id = src_vol_id
+ # Return src_volume_id
+ return self.src_volume_id
+ return None
+
+ def clone_volume(self):
+ """Clone Volume from source"""
+ try:
+ self.sfe.clone_volume(volume_id=self.src_volume_id,
+ name=self.name,
+ new_account_id=self.account_id,
+ new_size=self.size,
+ access=self.access,
+ snapshot_id=self.src_snapshot_id,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error creating clone %s of size %s" % (self.name, self.size), exception=to_native(err))
+
+ def apply(self):
+ """Perform pre-checks, call functions and exit"""
+ changed = False
+ result_message = ""
+
+ if self.get_account_id() is None:
+ self.module.fail_json(msg="Account id not found: %s" % (self.account_id))
+
+ # there is only one state. other operations
+ # are part of the volume module
+
+ # ensure that a volume with the clone name
+ # isn't already present
+ if self.elementsw_helper.volume_exists(self.name, self.account_id) is None:
+ # check for the source volume
+ if self.get_src_volume_id() is not None:
+ # check for a valid snapshot
+ if self.src_snapshot_id and not self.get_snapshot_id():
+ self.module.fail_json(msg="Snapshot id not found: %s" % (self.src_snapshot_id))
+ # change required
+ changed = True
+ else:
+ self.module.fail_json(msg="Volume id not found %s" % (self.src_volume_id))
+
+ if changed:
+ if self.module.check_mode:
+ result_message = "Check mode, skipping changes"
+ else:
+ self.clone_volume()
+ result_message = "Volume cloned"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """Create object and call apply"""
+ volume_clone = ElementOSVolumeClone()
+ volume_clone.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py
new file mode 100644
index 00000000..0d5b38a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_elementsw_volume_pair
+
+short_description: NetApp Element Software Volume Pair
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete volume pair
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume pair should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ src_volume:
+ description:
+ - Source volume name or volume ID
+ required: true
+ type: str
+
+ src_account:
+ description:
+ - Source account name or ID
+ required: true
+ type: str
+
+ dest_volume:
+ description:
+ - Destination volume name or volume ID
+ required: true
+ type: str
+
+ dest_account:
+ description:
+ - Destination account name or ID
+ required: true
+ type: str
+
+ mode:
+ description:
+ - Mode to start the volume pairing
+ choices: ['async', 'sync', 'snapshotsonly']
+ default: async
+ type: str
+
+ dest_mvip:
+ description:
+ - Destination IP address of the paired cluster.
+ required: true
+ type: str
+
+ dest_username:
+ description:
+ - Destination username for the paired cluster
+ - Optional if this is same as source cluster username.
+ type: str
+
+ dest_password:
+ description:
+ - Destination password for the paired cluster
+ - Optional if this is same as source cluster password.
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Create volume pair
+ na_elementsw_volume_pair:
+ hostname: "{{ src_cluster_hostname }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ state: present
+ src_volume: test1
+ src_account: test2
+ dest_volume: test3
+ dest_account: test4
+ mode: sync
+ dest_mvip: "{{ dest_cluster_hostname }}"
+
+ - name: Delete volume pair
+ na_elementsw_volume_pair:
+ hostname: "{{ src_cluster_hostname }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ state: absent
+ src_volume: 3
+ src_account: 1
+ dest_volume: 2
+ dest_account: 1
+ dest_mvip: "{{ dest_cluster_hostname }}"
+ dest_username: "{{ dest_cluster_username }}"
+ dest_password: "{{ dest_cluster_password }}"
+
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWVolumePair(object):
+ ''' class to handle volume pairing operations '''
+
+ def __init__(self):
+ """
+ Setup Ansible parameters and SolidFire connection
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ src_volume=dict(required=True, type='str'),
+ src_account=dict(required=True, type='str'),
+ dest_volume=dict(required=True, type='str'),
+ dest_account=dict(required=True, type='str'),
+ mode=dict(required=False, type='str',
+ choices=['async', 'sync', 'snapshotsonly'],
+ default='async'),
+ dest_mvip=dict(required=True, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.elem = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.elem)
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # get element_sw_connection for destination cluster
+ # overwrite existing source host, user and password with destination credentials
+ self.module.params['hostname'] = self.parameters['dest_mvip']
+ # username and password is same as source,
+ # if dest_username and dest_password aren't specified
+ if self.parameters.get('dest_username'):
+ self.module.params['username'] = self.parameters['dest_username']
+ if self.parameters.get('dest_password'):
+ self.module.params['password'] = self.parameters['dest_password']
+ self.dest_elem = netapp_utils.create_sf_connection(module=self.module)
+ self.dest_elementsw_helper = NaElementSWModule(self.dest_elem)
+
+ def check_if_already_paired(self, vol_id):
+ """
+ Check for idempotency
+ A volume can have only one pair
+ Return paired-volume-id if volume is paired already
+ None if volume is not paired
+ """
+ paired_volumes = self.elem.list_volumes(volume_ids=[vol_id],
+ is_paired=True)
+ for vol in paired_volumes.volumes:
+ for pair in vol.volume_pairs:
+ if pair is not None:
+ return pair.remote_volume_id
+ return None
+
+ def pair_volumes(self):
+ """
+ Start volume pairing on source, and complete on target volume
+ """
+ try:
+ pair_key = self.elem.start_volume_pairing(
+ volume_id=self.parameters['src_vol_id'],
+ mode=self.parameters['mode'])
+ self.dest_elem.complete_volume_pairing(
+ volume_pairing_key=pair_key.volume_pairing_key,
+ volume_id=self.parameters['dest_vol_id'])
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error pairing volume id %s"
+ % (self.parameters['src_vol_id']),
+ exception=to_native(err))
+
+ def pairing_exists(self, src_id, dest_id):
+ src_paired = self.check_if_already_paired(self.parameters['src_vol_id'])
+ dest_paired = self.check_if_already_paired(self.parameters['dest_vol_id'])
+ if src_paired is not None or dest_paired is not None:
+ return True
+ return None
+
+ def unpair_volumes(self):
+ """
+ Delete volume pair
+ """
+ try:
+ self.elem.remove_volume_pair(volume_id=self.parameters['src_vol_id'])
+ self.dest_elem.remove_volume_pair(volume_id=self.parameters['dest_vol_id'])
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error unpairing volume ids %s and %s"
+ % (self.parameters['src_vol_id'],
+ self.parameters['dest_vol_id']),
+ exception=to_native(err))
+
+ def get_account_id(self, account, type):
+ """
+ Get source and destination account IDs
+ """
+ try:
+ if type == 'src':
+ self.parameters['src_account_id'] = self.elementsw_helper.account_exists(account)
+ elif type == 'dest':
+ self.parameters['dest_account_id'] = self.dest_elementsw_helper.account_exists(account)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error: either account %s or %s does not exist"
+ % (self.parameters['src_account'],
+ self.parameters['dest_account']),
+ exception=to_native(err))
+
+ def get_volume_id(self, volume, type):
+ """
+ Get source and destination volume IDs
+ """
+ if type == 'src':
+ self.parameters['src_vol_id'] = self.elementsw_helper.volume_exists(volume, self.parameters['src_account_id'])
+ if self.parameters['src_vol_id'] is None:
+ self.module.fail_json(msg="Error: source volume %s does not exist"
+ % (self.parameters['src_volume']))
+ elif type == 'dest':
+ self.parameters['dest_vol_id'] = self.dest_elementsw_helper.volume_exists(volume, self.parameters['dest_account_id'])
+ if self.parameters['dest_vol_id'] is None:
+ self.module.fail_json(msg="Error: destination volume %s does not exist"
+ % (self.parameters['dest_volume']))
+
+ def get_ids(self):
+ """
+ Get IDs for volumes and accounts
+ """
+ self.get_account_id(self.parameters['src_account'], 'src')
+ self.get_account_id(self.parameters['dest_account'], 'dest')
+ self.get_volume_id(self.parameters['src_volume'], 'src')
+ self.get_volume_id(self.parameters['dest_volume'], 'dest')
+
+ def apply(self):
+ """
+ Call create / delete volume pair methods
+ """
+ self.get_ids()
+ paired = self.pairing_exists(self.parameters['src_vol_id'],
+ self.parameters['dest_vol_id'])
+ # calling helper to determine action
+ cd_action = self.na_helper.get_cd_action(paired, self.parameters)
+ if cd_action == "create":
+ self.pair_volumes()
+ elif cd_action == "delete":
+ self.unpair_volumes()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """ Apply volume pair actions """
+ vol_obj = ElementSWVolumePair()
+ vol_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py
new file mode 100644
index 00000000..f60ee678
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py
@@ -0,0 +1,33 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py
new file mode 100644
index 00000000..0972cd2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py
new file mode 100644
index 00000000..98f08ad6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py
@@ -0,0 +1,38 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py
new file mode 100644
index 00000000..0bd1e255
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py
@@ -0,0 +1,175 @@
+''' unit test for Ansible module: na_elementsw_account.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_access_group \
+ import ElementSWAccessGroup as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ADD_ERROR = 'some_error_in_add_access_group'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+ def list_volume_access_groups(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build access_group list: access_groups.name, access_groups.account_id '''
+ access_groups = list()
+ access_group_list = self.Bunch(volume_access_groups=access_groups)
+ return access_group_list
+
+ def create_volume_access_group(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'add' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+ def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' returns account_id '''
+ if self.force_error and 'account_id' in self.where:
+ account_id = None
+ else:
+ account_id = 1
+ print('account_id', account_id)
+ account = self.Bunch(account_id=account_id)
+ result = self.Bunch(account=account)
+ return result
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_command_called(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'name': 'element_groupname',
+ 'account_id': 'element_account_id',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'name': 'element_groupname',
+ 'account_id': 'element_account_id',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # apply() is calling list_accounts() and add_account()
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error creating volume access group element_groupname: %s' % ADD_ERROR
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_invalid_account_id(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'name': 'element_groupname',
+ 'account_id': 'element_account_id',
+ 'volumes': ['volume1'],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['account_id'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # apply() is calling list_accounts() and add_account()
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error: Specified account id "%s" does not exist.' % 'element_account_id'
+ assert exc.value.args[0]['msg'] == message
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py
new file mode 100644
index 00000000..fb78ad78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py
@@ -0,0 +1,245 @@
+''' unit test for Ansible module: na_elementsw_access_group_volumes.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_access_group_volumes \
+ import ElementSWAccessGroupVolumes as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+MODIFY_ERROR = 'some_error_in_modify_access_group'
+
+VOLUME_ID = 777
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None, volume_id=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.volume_id = volume_id
+
+ def list_volume_access_groups(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build access_group list: access_groups.name, access_groups.account_id '''
+ group_name = 'element_groupname'
+ if self.volume_id is None:
+ volume_list = list()
+ else:
+ volume_list = [self.volume_id]
+ access_group = self.Bunch(name=group_name, volume_access_group_id=888, volumes=volume_list)
+ access_groups = [access_group]
+ access_group_list = self.Bunch(volume_access_groups=access_groups)
+ return access_group_list
+
+ def list_volumes_for_account(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build volume list: volume.name, volume.id '''
+ volume = self.Bunch(name='element_volumename', volume_id=VOLUME_ID, delete_time='')
+ volumes = [volume]
+ volume_list = self.Bunch(volumes=volumes)
+ return volume_list
+
+ def modify_volume_access_group(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'modify_exception' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(MODIFY_ERROR)
+
+ def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' returns account_id '''
+ if self.force_error and 'get_account_id' in self.where:
+ account_id = None
+ else:
+ account_id = 1
+ print('account_id', account_id)
+ account = self.Bunch(account_id=account_id)
+ result = self.Bunch(account=account)
+ return result
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ 'state': 'present',
+ 'access_group': 'element_groupname',
+ 'volumes': 'element_volumename',
+ 'account_id': 'element_account_id',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_volume(self, mock_create_sf_connection):
+ ''' adding a volume '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_volume_idempotent(self, mock_create_sf_connection):
+ ''' adding a volume that is already in the access group '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(volume_id=VOLUME_ID)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_volume(self, mock_create_sf_connection):
+ ''' removing a volume that is in the access group '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(volume_id=VOLUME_ID)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_volume_idempotent(self, mock_create_sf_connection):
+ ''' removing a volume that is not in the access group '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_modify_exception(self, mock_create_sf_connection):
+ ''' modify does not return anything but can raise an exception '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error updating volume access group element_groupname: %s' % MODIFY_ERROR
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_invalid_volume_name(self, mock_create_sf_connection):
+ ''' report error if volume does not exist '''
+ args = dict(self.ARGS)
+ args['volumes'] = ['volume1']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error: Specified volume %s does not exist' % 'volume1'
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_invalid_account_group_name(self, mock_create_sf_connection):
+ ''' report error if access group does not exist '''
+ args = dict(self.ARGS)
+ args['access_group'] = 'something_else'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error: Specified access group "%s" does not exist for account id: %s.' % ('something_else', 'element_account_id')
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_invalid_account_id(self, mock_create_sf_connection):
+ ''' report error if account id is not found '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_account_id')
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error: Specified account id "%s" does not exist.' % 'element_account_id'
+ assert exc.value.args[0]['msg'] == message
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py
new file mode 100644
index 00000000..8075ba5c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py
@@ -0,0 +1,137 @@
+''' unit test for Ansible module: na_elementsw_account.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_account \
+ import ElementSWAccount as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ADD_ERROR = 'some_error_in_add_account'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+ def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build account list: account.username, account.account_id '''
+ accounts = list()
+ account_list = self.Bunch(accounts=accounts)
+ return account_list
+
+ def add_account(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'add' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_command_called(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'element_username': 'element_username',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'element_username': 'element_username',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # apply() is calling list_accounts() and add_account()
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error creating account element_username: %s' % ADD_ERROR
+ assert exc.value.args[0]['msg'] == message
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py
new file mode 100644
index 00000000..6624f374
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py
@@ -0,0 +1,228 @@
+''' unit test for Ansible module: na_elementsw_cluster.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import inspect
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster \
+ import ElementSWCluster as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+NODE_ID1 = 777
+NODE_ID2 = 888
+NODE_ID3 = 999
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __repr__(self):
+ results = dict()
+ for key, value in vars(self).items():
+ results[key] = repr(value)
+ return repr(results)
+
+ def __init__(self, force_error=False, where=None, nodes=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.nodes = nodes
+ self._port = 442
+ self.called = list()
+
+ def record(self, args, kwargs):
+ name = inspect.stack()[1][3] # caller function name
+ print('%s: , args: %s, kwargs: %s' % (name, args, kwargs))
+ self.called.append(name)
+
+ def create_cluster(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+
+ def send_request(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+
+ def get_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+ if self.force_error and self.where == 'get_config_exception':
+ raise ConnectionError
+ if self.nodes is not None:
+ nodes = ['%d:%s' % (i, node) for i, node in enumerate(self.nodes)]
+ else:
+ nodes = list()
+ cluster = self.Bunch(ensemble=nodes, cluster='cl_name')
+ config = self.Bunch(cluster=cluster)
+ return self.Bunch(config=config)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ # 'state': 'present',
+ 'management_virtual_ip': '10.10.10.10',
+ 'storage_virtual_ip': '10.10.10.11',
+ 'nodes': [NODE_ID1, NODE_ID2],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create(self, mock_create_sf_connection):
+ ''' create cluster basic '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_config_exception')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ msg = 'created'
+ assert msg in exc.value.args[0]['msg']
+ assert 'create_cluster' in my_obj.sfe_node.called
+ assert 'send_request' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_extra_parms(self, mock_create_sf_connection):
+ ''' force a direct call to send_request '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['order_number'] = '12345'
+ args['serial_number'] = '54321'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_config_exception')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ assert 'send_request' in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_idempotent(self, mock_create_sf_connection):
+ ''' cluster already exists with same nodes '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'send_request' not in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_idempotent_extra_nodes(self, mock_create_sf_connection):
+ ''' cluster already exists with more nodes '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2, NODE_ID3])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'Error: found existing cluster with more nodes in ensemble.'
+ assert msg in exc.value.args[0]['msg']
+ assert 'send_request' not in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_idempotent_extra_nodes_ok(self, mock_create_sf_connection):
+ ''' cluster already exists with more nodes but we're OK with a superset '''
+ args = dict(self.ARGS)
+ args['fail_if_cluster_already_exists_with_larger_ensemble'] = False
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2, NODE_ID3])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ msg = 'cluster already exists'
+ assert msg in exc.value.args[0]['msg']
+ assert 'send_request' not in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_idempotent_missing_nodes(self, mock_create_sf_connection):
+ ''' cluster already exists with fewer nodes.
+ Since not every node is lister in the ensemble, we can't tell if it's an error or not '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ msg = 'cluster already exists'
+ assert msg in exc.value.args[0]['msg']
+ assert 'send_request' not in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py
new file mode 100644
index 00000000..79f461cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py
@@ -0,0 +1,157 @@
+''' unit test for Ansible module: na_elementsw_cluster_config.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster_config \
+ import ElementSWClusterConfig as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+GET_ERROR = 'some_error_in_get_ntp_info'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'hostname': '10.253.168.129',
+ 'username': 'namburu',
+ 'password': 'SFlab1234',
+ })
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_module_fail_when_required_args_missing(self, mock_create_sf_connection):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_setup_ntp_info_called(self, mock_create_sf_connection):
+ ''' test if setup_ntp_info is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ ntp_dict = {'set_ntp_info': {'broadcastclient': None,
+ 'ntp_servers': ['1.1.1.1']}}
+ module_args.update(ntp_dict)
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_setup_ntp_info: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_set_encryption_at_rest_called(self, mock_create_sf_connection):
+ ''' test if set_encryption_at_rest is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'encryption_at_rest': 'present'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_set_encryption_at_rest enable: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ module_args.update({'encryption_at_rest': 'absent'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_set_encryption_at_rest disable: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_enable_feature_called(self, mock_create_sf_connection):
+ ''' test if enable_feature for vvols is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'enable_virtual_volumes': True})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_enable_feature: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_set_cluster_full_threshold_called(self, mock_create_sf_connection):
+ ''' test if set_cluster_full threshold is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ cluster_mod_dict = \
+ {'modify_cluster_full_threshold': {'stage2_aware_threshold': 2,
+ 'stage3_block_threshold_percent': 2,
+ 'max_metadata_over_provision_factor': 2}}
+ module_args.update(cluster_mod_dict)
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_set_cluster_full_threshold: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py
new file mode 100644
index 00000000..9236daa0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py
@@ -0,0 +1,176 @@
+''' unit test for Ansible module: na_elementsw_cluster_snmp.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster_snmp \
+ import ElementSWClusterSnmp as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+GET_ERROR = 'some_error_in_get_snmp_info'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'hostname': '10.117.78.131',
+ 'username': 'admin',
+ 'password': 'netapp1!',
+ })
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_module_fail_when_required_args_missing(self, mock_create_sf_connection):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_enable_snmp_called(self, mock_create_sf_connection):
+ ''' test if enable_snmp is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'snmp_v3_enabled': True,
+ 'state': 'present'})
+ module_args.update({'usm_users': {'access': 'rouser',
+ 'name': 'TestUser',
+ 'password': 'ChangeMe@123',
+ 'passphrase': 'ChangeMe@123',
+ 'secLevel': 'auth', }})
+
+ module_args.update({'networks': {'access': 'ro',
+ 'cidr': 24,
+ 'community': 'TestNetwork',
+ 'network': '192.168.0.1', }})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_if_enable_snmp_called: %s' % repr(exc.value))
+ assert exc.value
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_configure_snmp_from_version_3_TO_version_2_called(self, mock_create_sf_connection):
+ ''' test if configure snmp from version_3 to version_2'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'snmp_v3_enabled': False,
+ 'state': 'present'})
+ module_args.update({'usm_users': {'access': 'rouser',
+ 'name': 'TestUser',
+ 'password': 'ChangeMe@123',
+ 'passphrase': 'ChangeMe@123',
+ 'secLevel': 'auth', }})
+
+ module_args.update({'networks': {'access': 'ro',
+ 'cidr': 24,
+ 'community': 'TestNetwork',
+ 'network': '192.168.0.1', }})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ensure_configure_snmp_from_version_3_TO_version_2_called: %s' % repr(exc.value))
+ assert exc.value
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_configure_snmp_from_version_2_TO_version_3_called(self, mock_create_sf_connection):
+ ''' test if configure snmp from version_2 to version_3'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'snmp_v3_enabled': True,
+ 'state': 'present'})
+ module_args.update({'usm_users': {'access': 'rouser',
+ 'name': 'TestUser_sample',
+ 'password': 'ChangeMe@123',
+ 'passphrase': 'ChangeMe@123',
+ 'secLevel': 'auth', }})
+
+ module_args.update({'networks': {'access': 'ro',
+ 'cidr': 24,
+ 'community': 'TestNetwork',
+ 'network': '192.168.0.1', }})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ensure_configure_snmp_from_version_2_TO_version_3_called: %s' % repr(exc.value))
+ assert exc.value
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_disable_snmp_called(self, mock_create_sf_connection):
+ ''' test if disable_snmp is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'state': 'absent'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_if_disable_snmp_called: %s' % repr(exc.value))
+ assert exc.value
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py
new file mode 100644
index 00000000..696043d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py
@@ -0,0 +1,328 @@
+''' unit tests for Ansible module: na_elementsw_info.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import inspect
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_info \
+ import ElementSWInfo as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+NODE_ID1 = 777
+NODE_ID2 = 888
+NODE_ID3 = 999
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __repr__(self):
+ results = dict()
+ for key, value in vars(self).items():
+ results[key] = repr(value)
+ return repr(results)
+
+ def to_json(self):
+ return json.loads(json.dumps(self, default=lambda x: x.__dict__))
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.nodes = [NODE_ID1, NODE_ID2, NODE_ID3]
+ self._port = 442
+ self.called = list()
+ if force_error and where == 'cx':
+ raise netapp_utils.solidfire.common.ApiConnectionError('testme')
+
+ def record(self, args, kwargs):
+ name = inspect.stack()[1][3] # caller function name
+ print('%s: , args: %s, kwargs: %s' % (name, args, kwargs))
+ self.called.append(name)
+
+ def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build account list: account.username, account.account_id '''
+ self.record(repr(args), repr(kwargs))
+ accounts = list()
+ accounts.append({'username': 'user1'})
+ account_list = self.Bunch(accounts=accounts)
+ return account_list
+
+ def get_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+ if self.force_error and self.where == 'get_config_exception':
+ raise ConnectionError
+ if self.nodes is not None:
+ nodes = ['%d:%s' % (i, node) for i, node in enumerate(self.nodes)]
+ else:
+ nodes = list()
+ cluster = self.Bunch(ensemble=nodes, cluster='cl_name')
+ config = self.Bunch(cluster=cluster)
+ return self.Bunch(config=config)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ # 'state': 'present',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_default(self, mock_create_sf_connection):
+ ''' gather all by default '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'cluster_accounts' in exc.value.args[0]['info']
+ assert 'node_config' in exc.value.args[0]['info']
+ username = exc.value.args[0]['info']['cluster_accounts']['accounts'][0]['username']
+ assert username == 'user1'
+ assert 'list_accounts' in my_obj.sfe_node.called
+ assert 'get_config' in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_all(self, mock_create_sf_connection):
+ ''' gather all explictly '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['all']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'list_accounts' in my_obj.sfe_node.called
+ assert 'get_config' in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_clusters(self, mock_create_sf_connection):
+ ''' gather all cluster scoped subsets '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['all_clusters']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'cluster_accounts' in exc.value.args[0]['info']
+ accounts = exc.value.args[0]['info']['cluster_accounts']
+ print('accounts: >>%s<<' % accounts, type(accounts))
+ print(my_obj.sfe_node.called)
+ assert 'list_accounts' in my_obj.sfe_node.called
+ assert 'get_config' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_nodes(self, mock_create_sf_connection):
+ ''' gather all node scoped subsets '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['all_nodes']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'node_config' in exc.value.args[0]['info']
+ config = exc.value.args[0]['info']['node_config']
+ print('config: >>%s<<' % config, type(config))
+ print(my_obj.sfe_node.called)
+ assert 'list_accounts' not in my_obj.sfe_node.called
+ assert 'get_config' in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_nodes_not_alone(self, mock_create_sf_connection):
+ ''' gather all node scoped subsets but fail as another subset is present '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['all_nodes', 'dummy']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'no other subset is allowed'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_success(self, mock_create_sf_connection):
+ ''' filter on key, value - succesful match '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(username='user1')
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ username = exc.value.args[0]['info']['cluster_accounts']['accounts'][0]['username']
+ assert username == 'user1'
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_bad_key(self, mock_create_sf_connection):
+ ''' filter on key, value - key not found '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(bad_key='user1')
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'Error: key bad_key not found in'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_bad_key_ignored(self, mock_create_sf_connection):
+ ''' filter on key, value - key not found - ignore error '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(bad_key='user1')
+ args['fail_on_key_not_found'] = False
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['info']['cluster_accounts']['accounts'] == list()
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_record_not_found(self, mock_create_sf_connection):
+ ''' filter on key, value - no match '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(bad_key='user1')
+ args['fail_on_key_not_found'] = False
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['info']['cluster_accounts']['accounts'] == list()
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_record_not_found_error(self, mock_create_sf_connection):
+ ''' filter on key, value - no match - force error on empty '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(username='user111')
+ args['fail_on_record_not_found'] = True
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'Error: no match for'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_connection_error(self, mock_create_sf_connection):
+ ''' filter on key, value - no match - force error on empty '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # force a connection exception
+ mock_create_sf_connection.side_effect = netapp_utils.solidfire.common.ApiConnectionError('testme')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ print(exc.value.args[0])
+ msg = 'Failed to create connection for hostname:442'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_other_connection_error(self, mock_create_sf_connection):
+ ''' filter on key, value - no match - force error on empty '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # force a connection exception
+ mock_create_sf_connection.side_effect = KeyError('testme')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ print(exc.value.args[0])
+ msg = 'Failed to connect for hostname:442'
+ assert msg in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py
new file mode 100644
index 00000000..ee5ff85d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py
@@ -0,0 +1,201 @@
+''' unit test for Ansible module: na_elementsw_initiators.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_initiators \
+ import ElementSWInitiators as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ class Initiator(object):
+ def __init__(self, entries):
+ self.__dict__.update(entries)
+
+ def list_initiators(self):
+ ''' build initiator Obj '''
+ initiator = self.Bunch(
+ initiator_name="a",
+ initiator_id=13,
+ alias="a2",
+ # Note: 'config-mgmt' and 'event-source' are added for telemetry
+ attributes={'key': 'value', 'config-mgmt': 'ansible', 'event-source': 'na_elementsw_initiators'},
+ volume_access_groups=[1]
+ )
+ initiators = self.Bunch(
+ initiators=[initiator]
+ )
+ return initiators
+
+ def create_initiators(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' mock method '''
+ pass
+
+ def delete_initiators(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' mock method '''
+ pass
+
+ def modify_initiators(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' mock method '''
+ pass
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'hostname': '10.253.168.129',
+ 'username': 'namburu',
+ 'password': 'SFlab1234',
+ })
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_module_fail_when_required_args_missing(self, mock_create_sf_connection):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_initiator(self, mock_create_sf_connection):
+ ''' test if create initiator is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ initiator_dict = {
+ "state": "present",
+ "initiators": [{
+ "name": "newinitiator1",
+ "alias": "newinitiator1alias",
+ "attributes": {"key1": "value1"}
+ }]
+ }
+ module_args.update(initiator_dict)
+ set_module_args(module_args)
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_initiators: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_initiator(self, mock_create_sf_connection):
+ ''' test if delete initiator is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ initiator_dict = {
+ "state": "absent",
+ "initiators": [{
+ "name": "a"
+ }]
+ }
+ module_args.update(initiator_dict)
+ set_module_args(module_args)
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_initiators: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_initiator(self, mock_create_sf_connection):
+ ''' test if modify initiator is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ initiator_dict = {
+ "state": "present",
+ "initiators": [{
+ "name": "a",
+ "alias": "a3",
+ "attributes": {"key": "value"}
+ }]
+ }
+ module_args.update(initiator_dict)
+ set_module_args(module_args)
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_modify_initiators: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_initiator_idempotent(self, mock_create_sf_connection):
+ ''' test if modify initiator is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ initiator_dict = {
+ "state": "present",
+ "initiators": [{
+ "name": "a",
+ "alias": "a2",
+ "attributes": {"key": "value"},
+ "volume_access_group_id": 1
+ }]
+ }
+ module_args.update(initiator_dict)
+ set_module_args(module_args)
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_modify_initiators: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py
new file mode 100644
index 00000000..5364a4e7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py
@@ -0,0 +1,293 @@
+''' unit tests for Ansible module: na_elementsw_info.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import inspect
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_network_interfaces \
+ import ElementSWNetworkInterfaces as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+NODE_ID1 = 777
+NODE_ID2 = 888
+NODE_ID3 = 999
+
+MAPPING = dict(
+ bond_mode='bond-mode',
+ bond_lacp_rate='bond-lacp_rate',
+ dns_nameservers='dns-nameservers',
+ dns_search='dns-search',
+ virtual_network_tag='virtualNetworkTag',
+)
+
+
+def mapkey(key):
+ if key in MAPPING:
+ return MAPPING[key]
+ return key
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __repr__(self):
+ results = dict()
+ for key, value in vars(self).items():
+ results[key] = repr(value)
+ return repr(results)
+
+ def to_json(self):
+ return json.loads(json.dumps(self, default=lambda x: x.__dict__))
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ # self._port = 442
+ self.called = list()
+ self.set_network_config_args = dict()
+ if force_error and where == 'cx':
+ raise netapp_utils.solidfire.common.ApiConnectionError('testme')
+
+ def record(self, args, kwargs): # pylint: disable=unused-argument
+ name = inspect.stack()[1][3] # caller function name
+ # print('%s: , args: %s, kwargs: %s' % (name, args, kwargs))
+ self.called.append(name)
+
+ def set_network_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+ print('network:', kwargs['network'].to_json())
+ self.set_network_config_args = kwargs['network'].to_json()
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ DEPRECATED_ARGS = {
+ 'ip_address_1g': 'ip_address_1g',
+ 'subnet_1g': 'subnet_1g',
+ 'gateway_address_1g': 'gateway_address_1g',
+ 'mtu_1g': 'mtu_1g', # make sure the use a value != from default
+ 'bond_mode_1g': 'ALB', # make sure the use a value != from default
+ 'lacp_1g': 'Fast', # make sure the use a value != from default
+ 'ip_address_10g': 'ip_address_10g',
+ 'subnet_10g': 'subnet_10g',
+ 'gateway_address_10g': 'gateway_address_10g',
+ 'mtu_10g': 'mtu_10g', # make sure the use a value != from default
+ 'bond_mode_10g': 'LACP', # make sure the use a value != from default
+ 'lacp_10g': 'Fast', # make sure the use a value != from default
+ 'method': 'static',
+ 'dns_nameservers': 'dns_nameservers',
+ 'dns_search_domains': 'dns_search_domains',
+ 'virtual_network_tag': 'virtual_network_tag',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ ARGS = {
+ 'bond_1g': {
+ 'address': '10.10.10.10',
+ 'netmask': '255.255.255.0',
+ 'gateway': '10.10.10.1',
+ 'mtu': '1500',
+ 'bond_mode': 'ActivePassive',
+ 'dns_nameservers': ['dns_nameservers'],
+ 'dns_search': ['dns_search_domains'],
+ 'virtual_network_tag': 'virtual_network_tag',
+ },
+ 'bond_10g': {
+ 'bond_mode': 'LACP',
+ 'bond_lacp_rate': 'Fast',
+ },
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_deprecated_nothing(self):
+ ''' deprecated without 1g or 10g options '''
+ args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '1g' in key or '10g' in key:
+ del args[key]
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ msg = 'Please use the new bond_1g or bond_10g options to configure the bond interfaces.'
+ assert msg in exc.value.args[0]['msg']
+ msg = 'This module cannot set or change "method"'
+ assert msg in exc.value.args[0]['msg']
+
+ def test_deprecated_all(self):
+ ''' deprecated with all options '''
+ args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ msg = 'Please use the new bond_1g and bond_10g options to configure the bond interfaces.'
+ assert msg in exc.value.args[0]['msg']
+ msg = 'This module cannot set or change "method"'
+ assert msg in exc.value.args[0]['msg']
+
+ def test_deprecated_1g_only(self):
+ ''' deprecated with 1g options only '''
+ args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '10g' in key:
+ del args[key]
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ msg = 'Please use the new bond_1g option to configure the bond 1G interface.'
+ assert msg in exc.value.args[0]['msg']
+ msg = 'This module cannot set or change "method"'
+ assert msg in exc.value.args[0]['msg']
+
+ def test_deprecated_10g_only(self):
+ ''' deprecated with 10g options only '''
+ args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '1g' in key:
+ del args[key]
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ msg = 'Please use the new bond_10g option to configure the bond 10G interface.'
+ assert msg in exc.value.args[0]['msg']
+ msg = 'This module cannot set or change "method"'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_nothing(self, mock_create_sf_connection):
+ ''' modify without 1g or 10g options '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '1g' in key or '10g' in key:
+ del args[key]
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ print('LN:', my_obj.module.params)
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert len(my_obj.sfe.set_network_config_args) == 0
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_all(self, mock_create_sf_connection):
+ ''' modify with all options '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ assert 'Bond1G' in my_obj.sfe.set_network_config_args
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_1g_only(self, mock_create_sf_connection):
+ ''' modify with 1g options only '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '10g' in key:
+ del args[key]
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ assert 'Bond1G' in my_obj.sfe.set_network_config_args
+ assert 'Bond10G' not in my_obj.sfe.set_network_config_args
+ print(my_obj.sfe.set_network_config_args['Bond1G'])
+ for key in args['bond_1g']:
+ if key != 'bond_lacp_rate':
+ assert my_obj.sfe.set_network_config_args['Bond1G'][mapkey(key)] == args['bond_1g'][key]
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_10g_only(self, mock_create_sf_connection):
+ ''' modify with 10g options only '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '1g' in key:
+ del args[key]
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ assert 'Bond1G' not in my_obj.sfe.set_network_config_args
+ assert 'Bond10G' in my_obj.sfe.set_network_config_args
+ assert my_obj.sfe.set_network_config_args['Bond10G']['bond-lacp_rate'] == args['bond_10g']['bond_lacp_rate']
+ for key in args['bond_10g']:
+ assert my_obj.sfe.set_network_config_args['Bond10G'][mapkey(key)] == args['bond_10g'][key]
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py
new file mode 100644
index 00000000..3e163d00
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py
@@ -0,0 +1,324 @@
+''' unit test for Ansible module: na_elementsw_node.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_node \
+ import ElementSWNode as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+MODIFY_ERROR = 'some_error_in_modify_access_group'
+
+NODE_ID1 = 777
+NODE_ID2 = 888
+NODE_NAME1 = 'node_name1'
+NODE_NAME2 = 'node_name2'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None, node_id=None, cluster_name='', node_state='Pending'):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.node_id = node_id
+ self.cluster_name = cluster_name
+ self.node_state = node_state
+
+ def list_all_nodes(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build access_group list: access_groups.name, access_groups.account_id '''
+ nodes = list()
+ pending_nodes = list()
+ active_pending_nodes = list()
+ if self.node_id is None:
+ node_list = list()
+ else:
+ node_list = [self.node_id]
+ attrs1 = dict(mip='10.10.10.101', name=NODE_NAME1, node_id=NODE_ID1)
+ attrs2 = dict(mip='10.10.10.101', name=NODE_NAME2, node_id=NODE_ID2)
+ if self.where == 'pending':
+ attrs1['pending_node_id'] = NODE_ID1
+ attrs2['pending_node_id'] = NODE_ID2
+ node1 = self.Bunch(**attrs1)
+ node2 = self.Bunch(**attrs2)
+ if self.where == 'nodes':
+ nodes = [node1, node2]
+ elif self.where == 'pending':
+ pending_nodes = [node1, node2]
+ elif self.where == 'active_pending':
+ active_pending_nodes = [node1, node2]
+ node_list = self.Bunch(nodes=nodes, pending_nodes=pending_nodes, pending_active_nodes=active_pending_nodes)
+ return node_list
+
+ def add_nodes(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('adding_node: ', repr(args), repr(kwargs))
+
+ def remove_nodes(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('adding_node: ', repr(args), repr(kwargs))
+
+ def get_cluster_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('get_cluster_config: ', repr(args), repr(kwargs))
+ cluster = self.Bunch(cluster=self.cluster_name, state=self.node_state)
+ return self.Bunch(cluster=cluster)
+
+ def set_cluster_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('set_cluster_config: ', repr(args), repr(kwargs))
+
+ def list_drives(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('list_drives: ', repr(args), repr(kwargs))
+ drive = self.Bunch(node_id=self.node_id, status="active")
+ return self.Bunch(drives=[drive])
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ 'state': 'present',
+ 'node_ids': [NODE_ID1, NODE_ID2],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_node_fail_not_pending(self, mock_create_sf_connection):
+ ''' adding a node - fails as these nodes are unknown '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'nodes not in pending or active lists'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_node(self, mock_create_sf_connection):
+ ''' adding a node '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='pending')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_node_idempotent(self, mock_create_sf_connection):
+ ''' adding a node that is already in the cluster '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_node(self, mock_create_sf_connection):
+ ''' removing a node that is in the cluster '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_node_idempotent(self, mock_create_sf_connection):
+ ''' removing a node that is not in the cluster '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_node_with_active_drive(self, mock_create_sf_connection):
+ ''' removing a node that is in the cluster but still associated with a drive '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(node_id=NODE_ID1, where='nodes')
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'Error deleting node %s: node has active drives' % NODE_NAME1
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_only(self, mock_create_sf_connection):
+ ''' set cluster name without adding the node '''
+ args = dict(self.ARGS)
+ args['preset_only'] = True
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ message = 'List of updated nodes with cluster_name:'
+ assert message in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_only_idempotent(self, mock_create_sf_connection):
+ ''' set cluster name without adding the node - name already set '''
+ args = dict(self.ARGS)
+ args['preset_only'] = True
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(cluster_name=args['cluster_name'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ message = ''
+ assert message == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_and_add(self, mock_create_sf_connection):
+ ''' set cluster name and add the node '''
+ args = dict(self.ARGS)
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='pending')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ message = 'List of updated nodes with cluster_name:'
+ assert message in exc.value.args[0]['msg']
+ message = 'List of added nodes: '
+ assert message in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_and_add_idempotent(self, mock_create_sf_connection):
+ ''' set cluster name and add the node '''
+ args = dict(self.ARGS)
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name=args['cluster_name'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ message = ''
+ assert message == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_already_active_no_change(self, mock_create_sf_connection):
+ ''' set cluster name fails because node state is 'Active' '''
+ args = dict(self.ARGS)
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name=args['cluster_name'], node_state='Active')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ message = ''
+ assert message == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_already_active_change_not_allowed(self, mock_create_sf_connection):
+ ''' set cluster name fails because node state is 'Active' '''
+ args = dict(self.ARGS)
+ args['cluster_name'] = 'new_cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name='old_cluster_name', node_state='Active')
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = "Error updating cluster name for node %s, already in 'Active' state" % NODE_ID1
+ assert message == exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py
new file mode 100644
index 00000000..83ac3711
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py
@@ -0,0 +1,300 @@
+''' unit test for Ansible module: na_elementsw_qos_policy.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_qos_policy \
+ import ElementSWQosPolicy as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+CREATE_ERROR = 'create', 'some_error_in_create_qos_policy'
+MODIFY_ERROR = 'modify', 'some_error_in_modify_qos_policy'
+DELETE_ERROR = 'delete', 'some_error_in_delete_qos_policy'
+
+POLICY_ID = 888
+POLICY_NAME = 'element_qos_policy_name'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None, qos_policy_name=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.policy_name = qos_policy_name
+
+ def list_qos_policies(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build qos_policy list: qos_policy.name, qos_policy.account_id '''
+ if self.policy_name:
+ qos_policy_name = self.policy_name
+ else:
+ qos_policy_name = POLICY_NAME
+ qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000)
+ qos_policy = self.Bunch(name=qos_policy_name, qos_policy_id=POLICY_ID, qos=qos)
+ qos_policies = [qos_policy]
+ qos_policy_list = self.Bunch(qos_policies=qos_policies)
+ return qos_policy_list
+
+ def create_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'create_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*CREATE_ERROR)
+
+ def modify_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'modify_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*MODIFY_ERROR)
+
+ def delete_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'delete_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ 'state': 'present',
+ 'name': 'element_qos_policy_name',
+ 'qos': {'minIOPS': 1000, 'maxIOPS': 20000, 'burstIOPS': 20000},
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_qos_policy(self, mock_create_sf_connection):
+ ''' adding a qos_policy '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['name'] += '_1' # new name to force a create
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_qos_policy_idempotent(self, mock_create_sf_connection):
+ ''' adding a qos_policy '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_qos_policy(self, mock_create_sf_connection):
+ ''' removing a qos policy '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_qos_policy_idempotent(self, mock_create_sf_connection):
+ ''' removing a qos policy '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ args['name'] += '_1' # new name to force idempotency
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_qos_policy(self, mock_create_sf_connection):
+ ''' modifying a qos policy '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_rename_qos_policy(self, mock_create_sf_connection):
+ ''' renaming a qos policy '''
+ args = dict(self.ARGS)
+ args['from_name'] = args['name']
+ args['name'] = 'a_new_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_rename_modify_qos_policy_idempotent(self, mock_create_sf_connection):
+ ''' renaming a qos policy '''
+ args = dict(self.ARGS)
+ args['from_name'] = 'some_older_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_qos_policy_exception(self, mock_create_sf_connection):
+ ''' creating a qos policy can raise an exception '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['create_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error creating qos policy: %s' % POLICY_NAME
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_qos_policy_exception(self, mock_create_sf_connection):
+ ''' modifying a qos policy can raise an exception '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error updating qos policy: %s' % POLICY_NAME
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_qos_policy_exception(self, mock_create_sf_connection):
+ ''' deleting a qos policy can raise an exception '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['delete_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error deleting qos policy: %s' % POLICY_NAME
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_missing_qos_option(self, mock_create_sf_connection):
+ ''' report error if qos option is not given on create '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ args.pop('qos')
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = "Error creating qos policy: %s, 'qos:' option is required" % args['name']
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_missing_from_name_policy(self, mock_create_sf_connection):
+ ''' report error if qos policy to rename does not exist '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ args['from_name'] = 'something_not_likely_to_exist'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = "Error renaming qos policy, no existing policy with name/id: %s" % args['from_name']
+ assert exc.value.args[0]['msg'] == message
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py
new file mode 100644
index 00000000..7dc6e2d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py
@@ -0,0 +1,138 @@
+''' unit test for Ansible module: na_elementsw_account.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_account \
+ import ElementSWAccount as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ADD_ERROR = 'some_error_in_add_account'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+# TODO: replace list_accounts and add_account as needed
+ def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build account list: account.username, account.account_id '''
+ accounts = list()
+ account_list = self.Bunch(accounts=accounts)
+ return account_list
+
+ def add_account(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'add' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_command_called(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'element_username': 'element_username',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'element_username': 'element_username',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # apply() is calling list_accounts() and add_account()
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error creating account element_username: %s' % ADD_ERROR
+ assert exc.value.args[0]['msg'] == message
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py
new file mode 100644
index 00000000..e2dc51f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py
@@ -0,0 +1,343 @@
+''' unit test for Ansible module: na_elementsw_account.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan \
+ import ElementSWVlan as vlan # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ADD_ERROR = 'some_error_in_add_account'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ class Vlan(object):
+ def __init__(self, entries):
+ self.__dict__.update(entries)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+ def list_virtual_networks(self, virtual_network_tag=None): # pylint: disable=unused-argument
+ ''' list of vlans '''
+ if virtual_network_tag == '1':
+ add1 = self.Bunch(
+ start='2.2.2.2',
+ size=4
+ )
+ add2 = self.Bunch(
+ start='3.3.3.3',
+ size=4
+ )
+ vlan = self.Bunch(
+ attributes={'key': 'value', 'config-mgmt': 'ansible', 'event-source': 'na_elementsw_vlan'},
+ name="test",
+ address_blocks=[
+ add1,
+ add2
+ ],
+ svip='192.168.1.2',
+ gateway='0.0.0.0',
+ netmask='255.255.248.0',
+ namespace=False
+ )
+ vlans = self.Bunch(
+ virtual_networks=[vlan]
+ )
+ else:
+ vlans = self.Bunch(
+ virtual_networks=[]
+ )
+ return vlans
+
+ def add_virtual_network(self, virtual_network_tag=None, **create): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'add' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+ def remove_virtual_network(self, virtual_network_tag=None): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'remove' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+ def modify_virtual_network(self, virtual_network_tag=None, **modify): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'modify' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ vlan()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def mock_args(self):
+ args = {
+ 'state': 'present',
+ 'name': 'test',
+ 'vlan_tag': 1,
+ 'address_blocks': [
+ {'start': '192.168.1.2', 'size': 5}
+ ],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'netmask': '255.255.248.0',
+ 'gateway': '0.0.0.0',
+ 'namespace': False,
+ 'svip': '192.168.1.2'
+ }
+ return dict(args)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module.NaElementSWModule.set_element_attributes')
+ def test_successful_create(self, mock_set_attributes, mock_create_sf_connection):
+ ''' successful create'''
+ mock_set_attributes.return_value = {'key': 'new_value'}
+ data = self.mock_args()
+ data['vlan_tag'] = '3'
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_delete(self, mock_create_sf_connection):
+ ''' successful delete'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_modify(self, mock_create_sf_connection):
+ ''' successful modify'''
+ data = self.mock_args()
+ data['svip'] = '3.4.5.6'
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details')
+ def test_successful_modify_address_blocks_same_length(self, mock_get, mock_create_sf_connection):
+ ''' successful modify'''
+ mock_get.return_value = {
+ 'address_blocks': [
+ {'start': '10.10.10.20', 'size': 5},
+ {'start': '10.10.10.40', 'size': 5}
+ ]
+ }
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.50', 'size': 5}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details')
+ def test_successful_modify_address_blocks_different_length_1(self, mock_get, mock_create_sf_connection):
+ ''' successful modify'''
+ mock_get.return_value = {
+ 'address_blocks': [
+ {'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5}
+ ]
+ }
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5},
+ {'start': '10.20.10.50', 'size': 5}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details')
+ def test_successful_modify_address_blocks_different_length_2(self, mock_get, mock_create_sf_connection):
+ ''' successful modify'''
+ mock_get.return_value = {
+ 'address_blocks': [
+ {'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5},
+ {'start': '10.20.10.40', 'size': 5}
+ ]
+ }
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.40', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details')
+ def test_successful_modify_address_blocks_different_length_3(self, mock_get, mock_create_sf_connection):
+ ''' successful modify'''
+ mock_get.return_value = {
+ 'address_blocks': [
+ {'start': '10.10.10.20', 'size': 5},
+ {'start': '10.10.10.30', 'size': 5},
+ {'start': '10.20.10.40', 'size': 5}
+ ]
+ }
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.40', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_helper_validate_keys(self, mock_create_sf_connection):
+ '''test validate_keys()'''
+ data = self.mock_args()
+ del data['svip']
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.validate_keys()
+ msg = "One or more required fields ['address_blocks', 'svip', 'netmask', 'name'] for creating VLAN is missing"
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_modify_idempotent(self, mock_create_sf_connection):
+ ''' successful modify'''
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4},
+ {'start': '3.3.3.3', 'size': 4}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_modify_attribute_value(self, mock_create_sf_connection):
+ ''' successful modify'''
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4},
+ {'start': '3.3.3.3', 'size': 4}]
+ data['attributes'] = {'key': 'value2'}
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_modify_attribute_key(self, mock_create_sf_connection):
+ ''' successful modify'''
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4},
+ {'start': '3.3.3.3', 'size': 4}]
+ data['attributes'] = {'key2': 'value2'}
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py
new file mode 100644
index 00000000..926dda90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py
@@ -0,0 +1,364 @@
+''' unit test for Ansible module: na_elementsw_volume.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_volume \
+ import ElementSWVolume as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+CREATE_ERROR = 'create', 'some_error_in_create_volume'
+MODIFY_ERROR = 'modify', 'some_error_in_modify_volume'
+DELETE_ERROR = 'delete', 'some_error_in_delete_volume'
+
+POLICY_ID = 888
+POLICY_NAME = 'element_qos_policy_name'
+VOLUME_ID = 777
+VOLUME_NAME = 'element_volume_name'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None, with_qos_policy_id=True):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.with_qos_policy_id = with_qos_policy_id
+
+ def list_qos_policies(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build qos_policy list '''
+ qos_policy_name = POLICY_NAME
+ qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000)
+ qos_policy = self.Bunch(name=qos_policy_name, qos_policy_id=POLICY_ID, qos=qos)
+ qos_policy_1 = self.Bunch(name=qos_policy_name + '_1', qos_policy_id=POLICY_ID + 1, qos=qos)
+ qos_policies = [qos_policy, qos_policy_1]
+ qos_policy_list = self.Bunch(qos_policies=qos_policies)
+ return qos_policy_list
+
+ def list_volumes_for_account(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build volume list: volume.name, volume.id '''
+ volume = self.Bunch(name=VOLUME_NAME, volume_id=VOLUME_ID, delete_time='')
+ volumes = [volume]
+ volume_list = self.Bunch(volumes=volumes)
+ return volume_list
+
+ def list_volumes(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build volume details: volume.name, volume.id '''
+ if self.with_qos_policy_id:
+ qos_policy_id = POLICY_ID
+ else:
+ qos_policy_id = None
+ qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000)
+ volume = self.Bunch(name=VOLUME_NAME, volume_id=VOLUME_ID, delete_time='', access='rw',
+ account_id=1, qos=qos, qos_policy_id=qos_policy_id, total_size=1000000000,
+ attributes={'config-mgmt': 'ansible', 'event-source': 'na_elementsw_volume'}
+ )
+ volumes = [volume]
+ volume_list = self.Bunch(volumes=volumes)
+ return volume_list
+
+ def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' returns account_id '''
+ if self.force_error and 'get_account_id' in self.where:
+ account_id = None
+ else:
+ account_id = 1
+ account = self.Bunch(account_id=account_id)
+ result = self.Bunch(account=account)
+ return result
+
+ def create_volume(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'create_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*CREATE_ERROR)
+
+ def modify_volume(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ print("modify: %s, %s " % (repr(args), repr(kwargs)))
+ if self.force_error and 'modify_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*MODIFY_ERROR)
+
+ def delete_volume(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'delete_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR)
+
+ def purge_deleted_volume(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'delete_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ 'state': 'present',
+ 'name': VOLUME_NAME,
+ 'account_id': 'element_account_id',
+ 'qos': {'minIOPS': 1000, 'maxIOPS': 20000, 'burstIOPS': 20000},
+ 'qos_policy_name': POLICY_NAME,
+ 'size': 1,
+ 'enable512e': True,
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_volume(self, mock_create_sf_connection):
+ ''' adding a volume '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['name'] += '_1' # new name to force a create
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_or_modify_volume_idempotent_qos_policy(self, mock_create_sf_connection):
+ ''' adding a volume '''
+ args = dict(self.ARGS)
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_or_modify_volume_idempotent_qos(self, mock_create_sf_connection):
+ ''' adding a volume '''
+ args = dict(self.ARGS)
+ args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_volume(self, mock_create_sf_connection):
+ ''' removing a volume '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_volume_idempotent(self, mock_create_sf_connection):
+ ''' removing a volume '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ args['name'] += '_1' # new name to force idempotency
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_qos(self, mock_create_sf_connection):
+ ''' modifying a volume '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_qos_policy_to_qos(self, mock_create_sf_connection):
+ ''' modifying a volume '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_qos_policy(self, mock_create_sf_connection):
+ ''' modifying a volume '''
+ args = dict(self.ARGS)
+ args['qos_policy_name'] += '_1'
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_qos_to_qos_policy(self, mock_create_sf_connection):
+ ''' modifying a volume '''
+ args = dict(self.ARGS)
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_volume_exception(self, mock_create_sf_connection):
+ ''' creating a volume can raise an exception '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['create_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error provisioning volume: %s' % args['name']
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_exception(self, mock_create_sf_connection):
+ ''' modifying a volume can raise an exception '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error updating volume: %s' % VOLUME_ID
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_volume_exception(self, mock_create_sf_connection):
+ ''' deleting a volume can raise an exception '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['delete_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error deleting volume: %s' % VOLUME_ID
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_non_existent_qos_policy(self, mock_create_sf_connection):
+ ''' report error if qos option is not given on create '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ args.pop('qos')
+ args['qos_policy_name'] += '_2'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = "Cannot find qos policy with name/id: %s" % args['qos_policy_name']
+ assert exc.value.args[0]['msg'] == message
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py
new file mode 100644
index 00000000..171a7bae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py
@@ -0,0 +1,149 @@
+# Copyright (c) 2018 NetApp
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for module_utils netapp_module.py '''
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule as na_helper
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def test_get_cd_action_create(self):
+ ''' validate cd_action for create '''
+ current = None
+ desired = {'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result == 'create'
+
+ def test_get_cd_action_delete(self):
+ ''' validate cd_action for delete '''
+ current = {'state': 'absent'}
+ desired = {'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result == 'delete'
+
+ def test_get_cd_action(self):
+ ''' validate cd_action for returning None '''
+ current = None
+ desired = {'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result is None
+
+ def test_get_modified_attributes_for_no_data(self):
+ ''' validate modified attributes when current is None '''
+ current = None
+ desired = {'name': 'test'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {}
+
+ def test_get_modified_attributes(self):
+ ''' validate modified attributes '''
+ current = {'name': ['test', 'abcd', 'xyz', 'pqr'], 'state': 'present'}
+ desired = {'name': ['abcd', 'abc', 'xyz', 'pqr'], 'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == desired
+
+ def test_get_modified_attributes_for_intersecting_mixed_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': [2, 'four', 'six', 8]}
+ desired = {'name': ['a', 8, 'ab', 'four', 'abcd']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abcd']}
+
+ def test_get_modified_attributes_for_intersecting_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['two', 'four', 'six', 'eight']}
+ desired = {'name': ['a', 'six', 'ab', 'four', 'abc']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abc']}
+
+ def test_get_modified_attributes_for_nonintersecting_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['two', 'four', 'six', 'eight']}
+ desired = {'name': ['a', 'ab', 'abd']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abd']}
+
+ def test_get_modified_attributes_for_list_of_dicts_no_data(self):
+ ''' validate modified attributes for list diff '''
+ current = None
+ desired = {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {}
+
+ def test_get_modified_attributes_for_intersecting_list_of_dicts(self):
+ ''' validate modified attributes for list diff '''
+ current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]}
+ desired = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]}
+
+ def test_get_modified_attributes_for_nonintersecting_list_of_dicts(self):
+ ''' validate modified attributes for list diff '''
+ current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]}
+ desired = {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+
+ def test_get_modified_attributes_for_list_diff(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['test', 'abcd'], 'state': 'present'}
+ desired = {'name': ['abcd', 'abc'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['abc']}
+
+ def test_get_modified_attributes_for_no_change(self):
+ ''' validate modified attributes for same data in current and desired '''
+ current = {'name': 'test'}
+ desired = {'name': 'test'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {}
+
+ def test_is_rename_action_for_empty_input(self):
+ ''' validate rename action for input None '''
+ source = None
+ target = None
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result == source
+
+ def test_is_rename_action_for_no_source(self):
+ ''' validate rename action when source is None '''
+ source = None
+ target = 'test2'
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is False
+
+ def test_is_rename_action_for_no_target(self):
+ ''' validate rename action when target is None '''
+ source = 'test2'
+ target = None
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is True
+
+ def test_is_rename_action(self):
+ ''' validate rename action '''
+ source = 'test'
+ target = 'test2'
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is False
diff --git a/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/requirements.txt b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/requirements.txt
new file mode 100644
index 00000000..65a0c585
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/elementsw/tests/unit/requirements.txt
@@ -0,0 +1,2 @@
+solidfire-sdk-python
+unittest2 ; python_version < '2.7'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/CHANGELOG.rst b/collections-debian-merged/ansible_collections/netapp/ontap/CHANGELOG.rst
new file mode 100644
index 00000000..3d2852f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/CHANGELOG.rst
@@ -0,0 +1,608 @@
+=====================================
+NetApp ONTAP Collection Release Notes
+=====================================
+
+.. contents:: Topics
+
+
+v20.10.0
+========
+
+Minor Changes
+-------------
+
+- na_ontap_rest_info - Support for gather subsets - ``application_info, application_template_info, autosupport_config_info , autosupport_messages_history, ontap_system_version, storage_flexcaches_info, storage_flexcaches_origin_info, storage_ports_info, storage_qos_policies, storage_qtrees_config, storage_quota_reports, storage_quota_policy_rules, storage_shelves_config, storage_snapshot_policies, support_ems_config, support_ems_events, support_ems_filters``
+
+Bugfixes
+--------
+
+- na_ontap_aggregate - support concurrent actions for rename/modify/add_object_store and create/add_object_store.
+- na_ontap_cluster - ``single_node_cluster`` option was ignored.
+- na_ontap_info - KeyError on ``tree`` for quota_report_info.
+- na_ontap_info - better reporting on KeyError traceback, option to ignore error.
+- na_ontap_snapmirror_policy - report error when attempting to change ``policy_type`` rather than taking no action.
+- na_ontap_volume - ``encrypt`` with a value of ``false`` is ignored when creating a volume.
+
+v20.9.0
+=======
+
+Minor Changes
+-------------
+
+- na_ontap_cluster - ``node_name`` to set the node name when adding a node, or as an alternative to `cluster_ip_address`` to remove a node.
+- na_ontap_cluster - ``state`` can be set to ``absent`` to remove a node identified with ``cluster_ip_address`` or ``node_name``.
+- na_ontap_qtree - ``wait_for_completion`` and ``time_out`` to wait for qtree deletion when using REST.
+- na_ontap_quotas - ``soft_disk_limit`` and ``soft_file_limit`` for the quota target.
+- na_ontap_rest_info - Support for gather subsets - ``initiator_groups_info, san_fcp_services, san_iscsi_credentials, san_iscsi_services, san_lun_maps, storage_luns_info, storage_NVMe_namespaces.``
+
+Bugfixes
+--------
+
+- na_ontap_* - change version_added from '2.6' to '2.6.0' where applicable to satisfy sanity checker.
+- na_ontap_cluster - ``check_mode`` is now working properly.
+- na_ontap_interface - ``home_node`` is not required in pre-cluster mode.
+- na_ontap_interface - ``role`` is not required if ``service_policy`` is present and ONTAP version is 9.8.
+- na_ontap_interface - traceback in get_interface if node is not reachable.
+- na_ontap_job_schedule - allow ``job_minutes`` to set number to -1 for job creation with REST too.
+- na_ontap_qtree - fixed ``None is not subscriptable`` exception on rename operation.
+- na_ontap_volume - fixed ``KeyError`` exception on ``size`` when reporting creation error.
+- netapp.py - uncaught exception (traceback) on zapi.NaApiError.
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_active_directory - NetApp ONTAP configure active directory
+- netapp.ontap.na_ontap_mcc_mediator - NetApp ONTAP Add and Remove MetroCluster Mediator
+- netapp.ontap.na_ontap_metrocluster - NetApp ONTAP set up a MetroCluster
+
+v20.8.0
+=======
+
+Minor Changes
+-------------
+
+- add ``type:`` and ``elements:`` information where missing.
+- na_ontap_aggregate - support ``disk_size_with_unit`` option.
+- na_ontap_ldap_client - support ``ad_domain`` and ``preferred_ad_server`` options.
+- na_ontap_qtree - ``force_delete`` option with a DEFAULT of ``true`` so that ZAPI behavior is aligned with REST.
+- na_ontap_rest_info - Support for gather subsets - ``cloud_targets_info, cluster_chassis_info, cluster_jobs_info, cluster_metrics_info, cluster_schedules, broadcast_domains_info, cluster_software_history, cluster_software_packages, network_ports_info, ip_interfaces_info, ip_routes_info, ip_service_policies, network_ipspaces_info, san_fc_logins_info, san_fc_wppn-aliases, svm_dns_config_info, svm_ldap_config_info, svm_name_mapping_config_info, svm_nis_config_info, svm_peers_info, svm_peer-permissions_info``.
+- na_ontap_rest_info - Support for gather subsets for 9.8+ - ``cluster_metrocluster_diagnostics``.
+- na_ontap_security_certificates - ``ignore_name_if_not_supported`` option to not fail if ``name`` is present since ``name`` is not supported in ONTAP 9.6 and 9.7.
+- na_ontap_software_update - added ``timeout`` option to give enough time for the update to complete.
+- update ``required:`` information.
+- use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+
+Bugfixes
+--------
+
+- na_ontap_aggregate - ``disk-info`` error when using ``disks`` option.
+- na_ontap_autosupport_invoke - ``message`` has changed to ``autosupport_message`` as Redhat has reserved this word. ``message`` has been alias'd to ``autosupport_message``.
+- na_ontap_cifs_vserver - fix documentation and add more examples.
+- na_ontap_cluster - module was not idempotent when changing location or contact information.
+- na_ontap_igroup - idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase).
+- na_ontap_igroup_initiator - idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase).
+- na_ontap_info - Fixed error causing module to fail on ``metrocluster_check_info``, ``env_sensors_info`` and ``volume_move_target_aggr_info``.
+- na_ontap_security_certificates - allows (``common_name``, ``type``) as an alternate key since ``name`` is not supported in ONTAP 9.6 and 9.7.
+- na_ontap_snapmirror - fixed KeyError when accessing ``elationship_type`` parameter.
+- na_ontap_snapmirror_policy - fixed a race condition when creating a new policy.
+- na_ontap_snapmirror_policy - fixed idempotency issue withis_network_compression_enabled for REST.
+- na_ontap_software_update - ignore connection errors during update as nodes cannot be reachable.
+- na_ontap_user - enable lock state and password to be set in the same task for existing user.
+- na_ontap_volume - issue when snapdir_access and atime_update not passed together.
+- na_ontap_vscan_on_access_policy - ``bool`` type was not properly set for ``scan_files_with_no_ext``.
+- na_ontap_vscan_on_access_policy - ``policy_status`` enable/disable option was not supported.
+- na_ontap_vscan_on_demand_task - ``file_ext_to_include`` was not handled properly.
+- na_ontap_vscan_scanner_pool_policy - scanner_pool apply policy support on modification.
+- na_ontap_vserver_create(role) - lif creation now defaults to system-defined unless iscsi lif type.
+- use_rest is now case insensitive.
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_file_directory_policy - NetApp ONTAP create, delete, or modify vserver security file-directory policy
+- netapp.ontap.na_ontap_ssh_command - NetApp ONTAP Run any cli command over plain SSH using paramiko.
+- netapp.ontap.na_ontap_wait_for_condition - NetApp ONTAP wait_for_condition. Loop over a get status request until a condition is met.
+
+v20.7.0
+=======
+
+Minor Changes
+-------------
+
+- module_utils/netapp - add retry on wait_on_job when job failed. Abort 3 consecutive errors.
+- na_ontap_info - support ``continue_on_error`` option to continue when a ZAPI is not supported on a vserver, or for cluster RPC errors.
+- na_ontap_info - support ``query`` option to specify which objects to return.
+- na_ontap_info - support ``vserver`` tunneling to limit output to one vserver.
+- na_ontap_pb_get_online_volumes.yml - example playbook to list volumes that are online (or offline).
+- na_ontap_pb_install_SSL_certificate_REST.yml - example playbook to install SSL certificates using REST APIs.
+- na_ontap_rest_info - Support for gather subsets - ``cluster_node_info, cluster_peer_info, disk_info, cifs_services_info, cifs_share_info``.
+- na_ontap_snapmirror_policy - support for SnapMirror policy rules.
+- na_ontap_vscan_scanner_pool - support modification.
+
+Bugfixes
+--------
+
+- na_ontap_command - replace invalid backspace characters (0x08) with '.'.
+- na_ontap_firmware_download - exception on PCDATA if ONTAP returns a BEL (0x07) character.
+- na_ontap_info - lists were incorrectly processed in convert_keys, returning {}.
+- na_ontap_info - qtree_info is missing most entries. Changed key from `vserver:id` to `vserver:volume:id` .
+- na_ontap_iscsi_security - adding no_log for password parameters.
+- na_ontap_portset - adding explicit error message as modify portset is not supported.
+- na_ontap_snapmirror - fixed snapmirror delete for loadsharing to not go to quiesce state for the rest of the set.
+- na_ontap_ucadapter - fixed KeyError if type is not provided and mode is 'cna'.
+- na_ontap_user - checked `applications` does not contain snmp when using REST API call.
+- na_ontap_user - fixed KeyError if locked key not set with REST API call.
+- na_ontap_user - fixed KeyError if vserver - is empty with REST API call (useful to indicate cluster scope).
+- na_ontap_volume - fixed KeyError when getting info on a MVD volume
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_security_certificates - NetApp ONTAP manage security certificates.
+
+v20.6.1
+=======
+
+Minor Changes
+-------------
+
+- na_ontap_firmware_upgrade - ``reboot_sp`` - reboot service processor before downloading package.
+- na_ontap_firmware_upgrade - ``rename_package`` - rename file when downloading service processor package.
+- na_ontap_firmware_upgrade - ``replace_package`` - replace local file when downloading service processor package.
+
+Bugfixes
+--------
+
+- na_ontap_firmware_upgrade - images are not downloaded, but the module reports success.
+- na_ontap_password - do not error out if password is identical to previous password (idempotency).
+- na_ontap_user - fixed KeyError if password is not provided.
+
+v20.6.0
+=======
+
+Minor Changes
+-------------
+
+- all modules - SSL certificate authentication in addition to username/password (python 2.7 or 3.x).
+- all modules - ``cert_filepath``, ``key_filepath`` to enable SSL certificate authentication (python 2.7 or 3.x).
+- na_ontap_disks - ``disk_type`` option allows to assign specified type of disk.
+- na_ontap_firmware_upgrade - ignore timeout when downloading image unless ``fail_on_502_error`` is set to true.
+- na_ontap_info - ``desired_attributes`` advanced feature to select which fields to return.
+- na_ontap_info - ``use_native_zapi_tags`` to disable the conversion of '_' to '-' for attribute keys.
+- na_ontap_pb_install_SSL_certificate.yml - playbook example - installing a self-signed SSL certificate, and enabling SSL certificate authentication.
+- na_ontap_rest_info - ``fields`` options to request specific fields from subset.
+- na_ontap_snapmirror - now performs restore with optional field ``source_snapshot`` for specific snapshot or uses latest.
+- na_ontap_software_update - ``stabilize_minutes`` option specifies number of minutes needed to stabilize node before update.
+- na_ontap_ucadapter - ``pair_adapters`` option allows specifying the list of adapters which also need to be offline.
+- na_ontap_user - ``authentication_password`` option specifies password for the authentication protocol of SNMPv3 user.
+- na_ontap_user - ``authentication_protocol`` option specifies authentication protocol fo SNMPv3 user.
+- na_ontap_user - ``engine_id`` option specifies authoritative entity's EngineID for the SNMPv3 user.
+- na_ontap_user - ``privacy_password`` option specifies password for the privacy protocol of SNMPv3 user.
+- na_ontap_user - ``privacy_protocol`` option specifies privacy protocol of SNMPv3 user.
+- na_ontap_user - ``remote_switch_ipaddress`` option specifies the IP Address of the remote switch of SNMPv3 user.
+- na_ontap_user - added REST support for ONTAP user creation, modification & deletion.
+- na_ontap_volume - ``auto_remap_luns`` option controls automatic mapping of LUNs during volume rehost.
+- na_ontap_volume - ``check_interval`` option checks if a volume move has been completed and then waits this number of seconds before checking again.
+- na_ontap_volume - ``force_restore`` option forces volume to restore even if the volume has one or more newer Snapshotcopies.
+- na_ontap_volume - ``force_unmap_luns`` option controls automatic unmapping of LUNs during volume rehost.
+- na_ontap_volume - ``from_vserver`` option allows volume rehost from one vserver to another.
+- na_ontap_volume - ``preserve_lun_ids`` option controls LUNs in the volume being restored will remain mapped and their identities preserved.
+- na_ontap_volume - ``snapshot_restore`` option specifies name of snapshot to restore from.
+
+Bugfixes
+--------
+
+- module_utils/netapp_module - cater for empty lists in get_modified_attributes().
+- module_utils/netapp_module - cater for lists with duplicate elements in compare_lists().
+- na_ontap_firmware_upgrade - ignore timeout when downloading firmware images by default.
+- na_ontap_info - conversion from '-' to '_' was not done for lists of dictionaries.
+- na_ontap_ntfs_dacl - example fix in documentation string.
+- na_ontap_snapmirror - could not delete all rules (bug in netapp_module).
+- na_ontap_volume - `wait_on_completion` is supported with volume moves.
+- na_ontap_volume - fix KeyError on 'style' when volume is of type - data-protection.
+- na_ontap_volume - modify was invoked multiple times when once is enough.
+
+v20.5.0
+=======
+
+Minor Changes
+-------------
+
+- na_ontap_aggregate - ``raid_type`` options supports 'raid_0' for ONTAP Select.
+- na_ontap_cluster_config - role - Port Flowcontrol and autonegotiate can be set in role
+- na_ontap_cluster_peer - ``encryption_protocol_proposed`` option allows specifying encryption protocol to be used for inter-cluster communication.
+- na_ontap_info - new fact - aggr_efficiency_info.
+- na_ontap_info - new fact - cluster_switch_info.
+- na_ontap_info - new fact - disk_info.
+- na_ontap_info - new fact - env_sensors_info.
+- na_ontap_info - new fact - net_dev_discovery_info.
+- na_ontap_info - new fact - service_processor_info.
+- na_ontap_info - new fact - shelf_info.
+- na_ontap_info - new fact - sis_info.
+- na_ontap_info - new fact - subsys_health_info.
+- na_ontap_info - new fact - sys_cluster_alerts.
+- na_ontap_info - new fact - sysconfig_info.
+- na_ontap_info - new fact - volume_move_target_aggr_info.
+- na_ontap_info - new fact - volume_space_info.
+- na_ontap_nvme_namespace - ``block_size`` option allows specifying size in bytes of a logical block.
+- na_ontap_snapmirror - snapmirror now allows resume feature.
+- na_ontap_volume - ``cutover_action`` option allows specifying the action to be taken for cutover.
+
+Bugfixes
+--------
+
+- REST API call now honors the ``http_port`` parameter.
+- REST API detection now works with vserver (use_rest - Auto).
+- na_ontap_autosupport_invoke - when using ZAPI and name is not given, send autosupport message to all nodes in the cluster.
+- na_ontap_cg_snapshot - properly states it does not support check_mode.
+- na_ontap_cluster - ONTAP 9.3 or earlier does not support ZAPI element single-node-cluster.
+- na_ontap_cluster_ha - support check_mode.
+- na_ontap_cluster_peer - EMS log wrongly uses destination credentials with source hostname.
+- na_ontap_cluster_peer - support check_mode.
+- na_ontap_disks - support check_mode.
+- na_ontap_dns - support check_mode.
+- na_ontap_efficiency_policy - change ``duration`` type from int to str to support '-' input.
+- na_ontap_fcp - support check_mode.
+- na_ontap_flexcache - support check_mode.
+- na_ontap_info - `metrocluster_check_info` does not trigger a traceback but adds an "error" info element if the target system is not set up for metrocluster.
+- na_ontap_license - support check_mode.
+- na_ontap_login_messages - fix documentation link.
+- na_ontap_node - support check mode.
+- na_ontap_ntfs_sd - documentation string update for examples and made sure owner or group not mandatory.
+- na_ontap_ports - now support check mode.
+- na_ontap_restit - error can be a string in addition to a dict. This fix removes a traceback with AttributeError.
+- na_ontap_routes - support Check Mode correctly.
+- na_ontap_snapmirror - support check_mode.
+- na_ontap_software_update - Incorrectly stated that it support check mode, it does not.
+- na_ontap_svm_options - support check_mode.
+- na_ontap_volume - fix KeyError on 'style' when volume is offline.
+- na_ontap_volume - improve error reporting if required parameter is present but not set.
+- na_ontap_volume - suppress traceback in wait_for_completion as volume may not be completely ready.
+- na_ontap_volume_autosize - Support check_mode when `reset` option is given.
+- na_ontap_volume_snaplock - fix documentation link.
+- na_ontap_vserver_peer - EMS log wrongly uses destination credentials with source hostname.
+- na_ontap_vserver_peer - support check_mode.
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_rest_info - NetApp ONTAP information gatherer using REST APIs
+
+v20.4.1
+=======
+
+Minor Changes
+-------------
+
+- na_ontap_autosupport_invoke - added REST support for sending autosupport message.
+- na_ontap_firmware_upgrade - ``force_disruptive_update`` and ``package_url`` options allows to make choices for download and upgrading packages.
+- na_ontap_vserver_create has a new default variable ``netapp_version`` set to 140. If you are running 9.2 or below please add the variable to your playbook and set to 120
+
+Bugfixes
+--------
+
+- na_ontap_info - ``metrocluster_check_info`` has been removed as it was breaking the info module for everyone who didn't have a metrocluster set up. We are working on adding this back in a future update.
+- na_ontap_volume - ``volume_security_style`` option now allows modify.
+
+v20.4.0
+=======
+
+Minor Changes
+-------------
+
+- na_ontap_aggregate - ``disk_count`` option allows adding additional disk to aggregate.
+- na_ontap_info - ``max_records`` option specifies maximum number of records returned in a single ZAPI call.
+- na_ontap_info - ``summary`` option specifies a boolean flag to control return all or none of the info attributes.
+- na_ontap_info - new fact - iscsi_service_info.
+- na_ontap_info - new fact - license_info.
+- na_ontap_info - new fact - metrocluster_check_info.
+- na_ontap_info - new fact - metrocluster_info.
+- na_ontap_info - new fact - metrocluster_node_info.
+- na_ontap_info - new fact - net_interface_service_policy_info.
+- na_ontap_info - new fact - ontap_system_version.
+- na_ontap_info - new fact - ontapi_version (and deprecate ontap_version, both fields are reported for now).
+- na_ontap_info - new fact - qtree_info.
+- na_ontap_info - new fact - quota_report_info.
+- na_ontap_info - new fact - snapmirror_destination_info.
+- na_ontap_interface - ``service_policy`` option to identify a single service or a list of services that will use a LIF.
+- na_ontap_kerberos_realm - ``ad_server_ip`` option specifies IP Address of the Active Directory Domain Controller (DC).
+- na_ontap_kerberos_realm - ``ad_server_name`` option specifies Host name of the Active Directory Domain Controller (DC).
+- na_ontap_snapmirror - ``relationship-info-only`` option allows to manage relationship information.
+- na_ontap_snapmirror_policy - REST is included and all defaults are removed from options.
+- na_ontap_software_update - ``download_only`` options allows to download cluster image without software update.
+- na_ontap_volume - ``snapshot_auto_delete`` option allows to manage auto delete settings of a specified volume.
+
+Bugfixes
+--------
+
+- na_ontap_cifs_server - delete AD account if username and password are provided when state=absent
+- na_ontap_info - cifs_server_info - fix KeyError exception on ``domain`` if only ``domain-workgroup`` is present.
+- na_ontap_info - return all records of each gathered subset.
+- na_ontap_iscsi_security - Fixed modify functionality for CHAP and typo correction
+- na_ontap_kerberos_realm - fix ``kdc_vendor`` case sensitivity issue.
+- na_ontap_snapmirror - calling quiesce before snapmirror break.
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_autosupport_invoke - NetApp ONTAP send AutoSupport message
+- netapp.ontap.na_ontap_ntfs_dacl - NetApp Ontap create, delate or modify NTFS DACL (discretionary access control list)
+- netapp.ontap.na_ontap_ntfs_sd - NetApp ONTAP create, delete or modify NTFS security descriptor
+- netapp.ontap.na_ontap_restit - NetApp ONTAP Run any REST API on ONTAP
+- netapp.ontap.na_ontap_wwpn_alias - NetApp ONTAP set FCP WWPN Alias
+- netapp.ontap.na_ontap_zapit - NetApp ONTAP Run any ZAPI on ONTAP
+
+v20.3.0
+=======
+
+Minor Changes
+-------------
+
+- na_ontap_info - New info's added ``storage_bridge_info``
+- na_ontap_info - New info's added `cluster_identity_info``
+- na_ontap_snapmirror - performs resync when the ``relationship_state`` is active and the current state is broken-off.
+
+Bugfixes
+--------
+
+- na_ontap_volume_snaplock - Fixed KeyError exception on 'is-volume-append-mode-enabled'
+- na_ontap_vscan_scanner_pool - has been updated to match the standard format used for all other ontap modules
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_snapmirror_policy - NetApp ONTAP create, delete or modify SnapMirror policies
+- netapp.ontap.na_ontap_snmp_traphosts - NetApp ONTAP SNMP traphosts.
+
+v20.2.0
+=======
+
+Minor Changes
+-------------
+
+- na_ontap_info - New info's added ``snapshot_info``
+- na_ontap_info - ``max_records`` option to set maximum number of records to return per subset.
+- na_ontap_nas_create - role - fix typo in README file, add CIFS example. -
+- na_ontap_snapmirror - ``relationship_state`` option for breaking the snapmirror relationship.
+- na_ontap_snapmirror - ``update_snapmirror`` option for updating the snapmirror relationship.
+- na_ontap_volume_clone - ``split`` option to split clone volume from parent volume.
+
+Bugfixes
+--------
+
+- na_ontap_cifs_server - Fixed KeyError exception on 'cifs_server_name'
+- na_ontap_command - fixed traceback when using return_dict if u'1' is present in result value.
+- na_ontap_login_messages - Fixed example documentation and spelling mistake issue
+- na_ontap_nvme_subsystem - fixed bug when creating subsystem, vserver was not filtered.
+- na_ontap_qtree - Fixed issue with Get function for REST
+- na_ontap_svm - if language C.UTF-8 is specified, the module is not idempotent
+- na_ontap_svm - if snapshot policy is changed, modify fails with "Extra input - snapshot_policy"
+- na_ontap_volume_clone - fixed 'Extra input - parent-vserver' error when running as cluster admin.
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_volume_snaplock - NetApp ONTAP manage volume snaplock retention.
+
+v20.1.0
+=======
+
+Minor Changes
+-------------
+
+- na_ontap_aggregate - add ``snaplock_type``.
+- na_ontap_dns - added REST support for dns creation and modification on cluster vserver.
+- na_ontap_igroup_initiator - ``force_remove`` to forcibly remove initiators from an igroup that is currently mapped to a LUN.
+- na_ontap_info - New info's added ``cifs_server_info``, ``cifs_share_info``, ``cifs_vserver_security_info``, ``cluster_peer_info``, ``clock_info``, ``export_policy_info``, ``export_rule_info``, ``fcp_adapter_info``, ``fcp_alias_info``, ``fcp_service_info``, ``job_schedule_cron_info``, ``kerberos_realm_info``, ``ldap_client``, ``ldap_config``, ``net_failover_group_info``, ``net_firewall_info``, ``net_ipspaces_info``, ``net_port_broadcast_domain_info``, ``net_routes_info``, ``net_vlan_info``, ``nfs_info``, ``ntfs_dacl_info``, ``ntfs_sd_info``, ``ntp_server_info``, ``role_info``, ``service_processor_network_info``, ``sis_policy_info``, ``snapmirror_policy_info``, ``snapshot_policy_info``, ``vscan_info``, ``vserver_peer_info``
+- na_ontap_interface - ``failover_group`` to specify the failover group for the LIF. ``is_ipv4_link_local`` to specify the LIF's are to acquire a ipv4 link local address.
+- na_ontap_rest_cli - add OPTIONS as a supported verb and return list of allowed verbs.
+- na_ontap_volume - add ``group_id`` and ``user_id``.
+
+Bugfixes
+--------
+
+- na_ontap_aggregate - Fixed traceback when running as vsadmin and cleanly error out.
+- na_ontap_command - stdout_lines_filter contains data only if include/exlude_lines parameter is used. (zeten30)
+- na_ontap_command - stripped_line len is checked only once, filters are inside if block. (zeten30)
+- na_ontap_interface - allow module to run on node before joining the cluster.
+- na_ontap_net_ifgrp - Fixed error for na_ontap_net_ifgrp if no port is given.
+- na_ontap_snapmirror - Fixed traceback when running as vsadmin. Do not attempt to break a relationship that is 'Uninitialized'.
+- na_ontap_snapshot_policy - Fixed KeyError on ``prefix`` issue when prefix parameter isn't supplied.
+- na_ontap_volume - Fixed error reporting if efficiency policy cannot be read. Do not attempt to read efficiency policy if not needed.
+- na_ontap_volume - Fixed error when modifying volume efficiency policy.
+- na_ontap_volume_clone - Fixed KeyError exception on ``volume``
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_login_messages - Setup login banner and message of the day
+
+v19.11.0
+========
+
+Minor Changes
+-------------
+
+- na_ontap_cluster - added single node cluster option, also now supports for modify cluster contact and location option.
+- na_ontap_efficiency_policy - ``changelog_threshold_percent`` to set the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour.
+- na_ontap_info - Added ``vscan_status_info``, ``vscan_scanner_pool_info``, ``vscan_connection_status_all_info``, ``vscan_connection_extended_stats_info``
+- na_ontap_info - Now allow you use to vsadmin to get info (Must user ``vserver`` option).
+
+Bugfixes
+--------
+
+- na_ontap_cluster - autosupport log pushed after cluster create is performed, removed license add or remove option.
+- na_ontap_dns - report error if modify or delete operations are attempted on cserver when using REST. Make create operation idempotent for cserver when using REST. Support for modify/delete on cserver when using REST will be added later.
+- na_ontap_firewall_policy - portmap added as a valid service
+- na_ontap_net_routes - REST does not support the ``metric`` attribute
+- na_ontap_snapmirror - added initialize boolean option which specifies whether to initialize SnapMirror relation.
+- na_ontap_volume - fixed error when deleting flexGroup volume with ONTAP 9.7.
+- na_ontap_volume - tiering option requires 9.4 or later (error on volume-comp-aggr-attributes)
+- na_ontap_vscan_scanner_pool - fix module only gets one scanner pool.
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_quota_policy - NetApp Ontap create, rename or delete quota policy
+
+v19.10.1
+========
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_iscsi_security - NetApp ONTAP Manage iscsi security.
+
+v19.10.0
+========
+
+Minor Changes
+-------------
+
+- Added REST support to existing modules.
+ By default, the module will use REST if the target system supports it, and the options are supported. Otherwise, it will switch back to ZAPI.
+ This behavior can be controlled with the ``use_rest`` option.
+ Always - to force REST. The module fails and reports an error if REST cannot be used.
+ Never - to force ZAPI. This could be useful if you find some incompatibility with REST, or want to confirm the behavior is identical between REST and ZAPI.
+ Auto - the default, as described above.
+- na_ontap_cluster_config - role updated to support a cleaner playbook
+- na_ontap_command - ``vserver`` - to allow command to run as either cluster admin or vserver admin. To run as vserver admin you must use the vserver option.
+- na_ontap_export_policy - REST support
+- na_ontap_ipspace - REST support
+- na_ontap_job_schedule - REST support
+- na_ontap_motd - rename ``message`` to ``motd_message`` to avoid conflict with Ansible internal variable name.
+- na_ontap_nas_create - role updated to support a cleaner playbook
+- na_ontap_ndmp - REST support - only ``enable`` and ``authtype`` are supported with REST
+- na_ontap_net_routes - REST support
+- na_ontap_nvme_namespace - ``size_unit`` to specify size in different units.
+- na_ontap_qtree - REST support - ``oplocks`` is not supported with REST, defaults to enable.
+- na_ontap_san_create - role updated to support a cleaner playbook
+- na_ontap_snapshot_policy - ``prefix`` - option to use for creating snapshot policy.
+- na_ontap_svm - REST support - ``root_volume``, ``root_volume_aggregate``, ``root_volume_security_style`` are not supported with REST.
+- na_ontap_vserver_create - role updated to support a cleaner playbook
+
+Bugfixes
+--------
+
+- na ontap_net_routes - change metric type from string to int.
+- na_ontap_cifs_server - minor documentation changes correction of create example with "name" parameter and adding type to parameters.
+- na_ontap_firewall_policy - documentation changed for supported service parameter.
+- na_ontap_ndmp - minor documentation changes for restore_vm_cache_size and data_port_range.
+- na_ontap_net_subnet - fix ip_ranges option fails on existing subnet.
+- na_ontap_net_subnet - fix rename idempotency issue and updated rename check.
+- na_ontap_nvme_subsystem - fix fetching unique nvme subsytem based on vserver filter.
+- na_ontap_qtree - REST API takes "unix_permissions" as parameter instead of "mode".
+- na_ontap_qtree - unix permission is not available when security style is ntfs
+- na_ontap_snapshot_policy - fix vsadmin approach for managing snapshot policy.
+- na_ontap_svm - ``allowed_protocols`` added to param in proper way in case of using REST API
+- na_ontap_user - minor documentation update for application parameter.
+- na_ontap_volume - ``efficiency_policy`` was ignored
+- na_ontap_volume - enforce that space_slo and space_guarantee are mutually exclusive
+- na_ontap_vserver_cifs_security - fix int and boolean options when modifying vserver cifs security.
+
+v2.9.0
+======
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_efficiency_policy - NetApp ONTAP manage efficiency policies (sis policies)
+- netapp.ontap.na_ontap_firmware_upgrade - NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk.
+- netapp.ontap.na_ontap_info - NetApp information gatherer
+- netapp.ontap.na_ontap_ipspace - NetApp ONTAP Manage an ipspace
+- netapp.ontap.na_ontap_kerberos_realm - NetApp ONTAP vserver nfs kerberos realm
+- netapp.ontap.na_ontap_ldap - NetApp ONTAP LDAP
+- netapp.ontap.na_ontap_ldap_client - NetApp ONTAP LDAP client
+- netapp.ontap.na_ontap_ndmp - NetApp ONTAP NDMP services configuration
+- netapp.ontap.na_ontap_object_store - NetApp ONTAP manage object store config.
+- netapp.ontap.na_ontap_ports - NetApp ONTAP add/remove ports
+- netapp.ontap.na_ontap_qos_adaptive_policy_group - NetApp ONTAP Adaptive Quality of Service policy group.
+- netapp.ontap.na_ontap_rest_cli - NetApp ONTAP Run any cli command, the username provided needs to have console login permission.
+- netapp.ontap.na_ontap_volume_autosize - NetApp ONTAP manage volume autosize
+- netapp.ontap.na_ontap_vscan - NetApp ONTAP Vscan enable/disable.
+- netapp.ontap.na_ontap_vserver_cifs_security - NetApp ONTAP vserver CIFS security modification
+
+v2.8.0
+======
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_flexcache - NetApp ONTAP FlexCache - create/delete relationship
+- netapp.ontap.na_ontap_igroup_initiator - NetApp ONTAP igroup initiator configuration
+- netapp.ontap.na_ontap_lun_copy - NetApp ONTAP copy LUNs
+- netapp.ontap.na_ontap_net_subnet - NetApp ONTAP Create, delete, modify network subnets.
+- netapp.ontap.na_ontap_nvme - NetApp ONTAP Manage NVMe Service
+- netapp.ontap.na_ontap_nvme_namespace - NetApp ONTAP Manage NVME Namespace
+- netapp.ontap.na_ontap_nvme_subsystem - NetApp ONTAP Manage NVME Subsystem
+- netapp.ontap.na_ontap_portset - NetApp ONTAP Create/Delete portset
+- netapp.ontap.na_ontap_qos_policy_group - NetApp ONTAP manage policy group in Quality of Service.
+- netapp.ontap.na_ontap_quotas - NetApp ONTAP Quotas
+- netapp.ontap.na_ontap_security_key_manager - NetApp ONTAP security key manager.
+- netapp.ontap.na_ontap_snapshot_policy - NetApp ONTAP manage Snapshot Policy
+- netapp.ontap.na_ontap_unix_group - NetApp ONTAP UNIX Group
+- netapp.ontap.na_ontap_unix_user - NetApp ONTAP UNIX users
+- netapp.ontap.na_ontap_vscan_on_access_policy - NetApp ONTAP Vscan on access policy configuration.
+- netapp.ontap.na_ontap_vscan_on_demand_task - NetApp ONTAP Vscan on demand task configuration.
+- netapp.ontap.na_ontap_vscan_scanner_pool - NetApp ONTAP Vscan Scanner Pools Configuration.
+
+v2.7.0
+======
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_autosupport - NetApp ONTAP Autosupport
+- netapp.ontap.na_ontap_cg_snapshot - NetApp ONTAP manage consistency group snapshot
+- netapp.ontap.na_ontap_cluster_peer - NetApp ONTAP Manage Cluster peering
+- netapp.ontap.na_ontap_command - NetApp ONTAP Run any cli command, the username provided needs to have console login permission.
+- netapp.ontap.na_ontap_disks - NetApp ONTAP Assign disks to nodes
+- netapp.ontap.na_ontap_dns - NetApp ONTAP Create, delete, modify DNS servers.
+- netapp.ontap.na_ontap_fcp - NetApp ONTAP Start, Stop and Enable FCP services.
+- netapp.ontap.na_ontap_firewall_policy - NetApp ONTAP Manage a firewall policy
+- netapp.ontap.na_ontap_motd - Setup motd
+- netapp.ontap.na_ontap_node - NetApp ONTAP Rename a node.
+- netapp.ontap.na_ontap_snapmirror - NetApp ONTAP or ElementSW Manage SnapMirror
+- netapp.ontap.na_ontap_software_update - NetApp ONTAP Update Software
+- netapp.ontap.na_ontap_svm_options - NetApp ONTAP Modify SVM Options
+- netapp.ontap.na_ontap_vserver_peer - NetApp ONTAP Vserver peering
+
+v2.6.0
+======
+
+New Modules
+-----------
+
+- netapp.ontap.na_ontap_aggregate - NetApp ONTAP manage aggregates.
+- netapp.ontap.na_ontap_broadcast_domain - NetApp ONTAP manage broadcast domains.
+- netapp.ontap.na_ontap_broadcast_domain_ports - NetApp ONTAP manage broadcast domain ports
+- netapp.ontap.na_ontap_cifs - NetApp ONTAP Manage cifs-share
+- netapp.ontap.na_ontap_cifs_acl - NetApp ONTAP manage cifs-share-access-control
+- netapp.ontap.na_ontap_cifs_server - NetApp ONTAP CIFS server configuration
+- netapp.ontap.na_ontap_cluster - NetApp ONTAP cluster - create a cluster and add/remove nodes.
+- netapp.ontap.na_ontap_cluster_ha - NetApp ONTAP Manage HA status for cluster
+- netapp.ontap.na_ontap_export_policy - NetApp ONTAP manage export-policy
+- netapp.ontap.na_ontap_export_policy_rule - NetApp ONTAP manage export policy rules
+- netapp.ontap.na_ontap_igroup - NetApp ONTAP iSCSI or FC igroup configuration
+- netapp.ontap.na_ontap_interface - NetApp ONTAP LIF configuration
+- netapp.ontap.na_ontap_iscsi - NetApp ONTAP manage iSCSI service
+- netapp.ontap.na_ontap_job_schedule - NetApp ONTAP Job Schedule
+- netapp.ontap.na_ontap_license - NetApp ONTAP protocol and feature licenses
+- netapp.ontap.na_ontap_lun - NetApp ONTAP manage LUNs
+- netapp.ontap.na_ontap_lun_map - NetApp ONTAP LUN maps
+- netapp.ontap.na_ontap_net_ifgrp - NetApp Ontap modify network interface group
+- netapp.ontap.na_ontap_net_port - NetApp ONTAP network ports.
+- netapp.ontap.na_ontap_net_routes - NetApp ONTAP network routes
+- netapp.ontap.na_ontap_net_vlan - NetApp ONTAP network VLAN
+- netapp.ontap.na_ontap_nfs - NetApp ONTAP NFS status
+- netapp.ontap.na_ontap_ntp - NetApp ONTAP NTP server
+- netapp.ontap.na_ontap_qtree - NetApp ONTAP manage qtrees
+- netapp.ontap.na_ontap_service_processor_network - NetApp ONTAP service processor network
+- netapp.ontap.na_ontap_snapshot - NetApp ONTAP manage Snapshots
+- netapp.ontap.na_ontap_snmp - NetApp ONTAP SNMP community
+- netapp.ontap.na_ontap_svm - NetApp ONTAP SVM
+- netapp.ontap.na_ontap_ucadapter - NetApp ONTAP UC adapter configuration
+- netapp.ontap.na_ontap_user - NetApp ONTAP user configuration and management
+- netapp.ontap.na_ontap_user_role - NetApp ONTAP user role configuration and management
+- netapp.ontap.na_ontap_volume - NetApp ONTAP manage volumes.
+- netapp.ontap.na_ontap_volume_clone - NetApp ONTAP manage volume clones.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/FILES.json b/collections-debian-merged/ansible_collections/netapp/ontap/FILES.json
new file mode 100644
index 00000000..4d3a4daf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/FILES.json
@@ -0,0 +1,2546 @@
+{
+ "files": [
+ {
+ "format": 1,
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": ".",
+ "chksum_type": null
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/doc_fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "622dfbf7c4072b021ff59f4bd6a709fe8d6841dfcc42e31b513737e0674fa392",
+ "name": "plugins/doc_fragments/netapp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/module_utils",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fc2f779d8618d23a6b851685806c9c62e7c35010830a0ba1ae51e197cf126cb1",
+ "name": "plugins/module_utils/netapp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b41ebaef8e0bcfdf28b53bc5845b3846353475482c25e7a55112d6e2bfdfe3a2",
+ "name": "plugins/module_utils/rest_response_helpers.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "17f95741c521df69dd02816e4b361644698a94879af1fc3b3568a78256f178f0",
+ "name": "plugins/module_utils/zapis_svm.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "29fcc9f2d68af9077aeb7d1d62ba6de244f0ad8d93371cf70f4b83ad909b65cb",
+ "name": "plugins/module_utils/netapp_module.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "682b84cc6f603ee6f6bf73bcb713830fb9d0326095bc36e2d24a12b5a2c22710",
+ "name": "plugins/module_utils/netapp_elementsw_module.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8c25f79b27de0714f29255f2611aa0a07a79276ae8e0f44c8388ae75e92c16e5",
+ "name": "plugins/module_utils/rest_application.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/modules",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0504cae80ee47b71be42d82e3567d992ce7ec434a5d6c625ed8d827aff82cb35",
+ "name": "plugins/modules/na_ontap_job_schedule.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "29ae0f0bb39e71a6b6e33640e8013c59f8b468ca44cf8893bdcd66ad1b15ea92",
+ "name": "plugins/modules/na_ontap_ntfs_dacl.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9601ad6efd3f07e5ea4e0fb460188b099ca98d84cebd8ccbde24d792cd46d247",
+ "name": "plugins/modules/na_ontap_efficiency_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e68f96b0364438c34b4a01bc74c9d197ea4e2afc5f982e2de81b36efafa33f64",
+ "name": "plugins/modules/na_ontap_quota_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0de48dd6b0c4197400fa624e879fc7f217594bd9bd94b1b61a78e26bb7c40482",
+ "name": "plugins/modules/na_ontap_flexcache.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f7c21d2b18c1b02c647c44a542c6ae7944d5a0d154f072215536d7f45fa60fea",
+ "name": "plugins/modules/na_ontap_service_processor_network.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f86c9f4b45b9184b289a678ee966115279f44ccd0e2c284e9b22b8eb5a5d3ddc",
+ "name": "plugins/modules/na_ontap_snapmirror_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e28c43cf423a1cdb71c4d5cf9a329aa0885077489e610504d638f0a3e234c59f",
+ "name": "plugins/modules/na_ontap_vscan_on_demand_task.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "48bb48d36fafdaa302c423910e429cb23e3e70ce1fc872669815f36c78f0b6a7",
+ "name": "plugins/modules/na_ontap_dns.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6e4bf554c1844d4ac8987d50c51e421a54b6bf2af857e333c3c2993f8004ea17",
+ "name": "plugins/modules/na_ontap_cifs.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "668d6edf8bb293796f48cbd77f5637082ec8d4bafd9b08d6e99c3c05498ff962",
+ "name": "plugins/modules/na_ontap_volume.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "bc4a15cfb66f06b9b9f302d29f9e7e6bda68ac3ec93e051366fa4cf4e36cef03",
+ "name": "plugins/modules/na_ontap_export_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ae642e26def416d9305d6fa18c9fdc66d2d5134f87889c76689212cdb35fa36a",
+ "name": "plugins/modules/na_ontap_metrocluster_dr_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3c51804efc6e8b6f51fa09fdff437417eaf80b5f70c23ae604b23ad46963e6f",
+ "name": "plugins/modules/na_ontap_ports.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7dcabbbae88acf33ffbc4bcde862a4cd21149e1e666f4f710de637d5d8007b02",
+ "name": "plugins/modules/na_ontap_restit.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e65e283de780b53054e20fa761d85028558e5edaf689877db84e33107be1bdb4",
+ "name": "plugins/modules/na_ontap_volume_clone.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e417baadd25b96dbed77e00bd0869066bc46cfdef3f3c0c5b15b335cf7861206",
+ "name": "plugins/modules/na_ontap_volume_snaplock.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6532ccee7d7e562d2484830a31521d8e99a40b5b60372a98cc1ff46fdcefa062",
+ "name": "plugins/modules/na_ontap_net_routes.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "20905de25adb5cf88307c6c14c39086823141f69e3d0699e5172dcae63d1e21c",
+ "name": "plugins/modules/na_ontap_wait_for_condition.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "97a68b7de310f6df215e9bb4d5c5d79942e0ccf1c9706ccd414e711b4777a765",
+ "name": "plugins/modules/na_ontap_nvme.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4bd735d872daf17a7c0a3fe95b84b9495705b0e071edef872d1a813dc7af58f3",
+ "name": "plugins/modules/na_ontap_net_port.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8adbc4c39558ec3a3d7aa4a064874db22ddb10e0d32600d80065a8b012e5ce42",
+ "name": "plugins/modules/na_ontap_lun_map.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "416a23c9befc35a96c74d6d66b2941e68d0b302a03a09af7d69ba4abd28b2edf",
+ "name": "plugins/modules/na_ontap_user.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3212a82874157ad061023002eba71d60c3eff88dd0971f958c155cfcc4dca3e5",
+ "name": "plugins/modules/na_ontap_qos_adaptive_policy_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fb09430ff998116a932cf463fc16ba752c82618492538d160eb548f04201e19d",
+ "name": "plugins/modules/na_ontap_security_certificates.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7fd84fa8d194ed363cb8324bb93c534faf4fc1fab02598651acb903fd4fc81df",
+ "name": "plugins/modules/na_ontap_autosupport_invoke.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "67607676e007ff293e57b2cac0fc0383becac819a06742ec03f8179171b4e554",
+ "name": "plugins/modules/na_ontap_quotas.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3b06cf4b098f63b92bbb3933b47ea1d1b533347238dbad52050bc62e65a9e2ad",
+ "name": "plugins/modules/na_ontap_mcc_mediator.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "02bcd185b7014ea9172780881566585997f1db9d5f9af40824b36a4d9f567f7d",
+ "name": "plugins/modules/na_ontap_unix_user.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3eba7898bc62496ec1858c5f3d5d5b95ecf28ae8a54d9045eda78f8169b9782a",
+ "name": "plugins/modules/na_ontap_wwpn_alias.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7c4e6f6c1777e75fe96da2fe152b2b965d68cc144637693a4cebc183207d857e",
+ "name": "plugins/modules/na_ontap_iscsi_security.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c1d0c33de4d20a9e8e8af55ce71ac9f2459124a3e31f08d34670a45f473239b2",
+ "name": "plugins/modules/na_ontap_snapmirror.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e9bdf43cfbd26d7ae0145e99ded7872de950f743d3238de99db1a04dd5dc1a8d",
+ "name": "plugins/modules/na_ontap_command.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4bc0daf94833a70481940d0520ba150d9f2d91d0160a1e5adebadc0f120a05f3",
+ "name": "plugins/modules/na_ontap_snmp_traphosts.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "713b26b7e5acd7646b094af208deb67d1bc2c24c77875c784861278a4e40143b",
+ "name": "plugins/modules/na_ontap_snapshot_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "aee2103994affa8863aae82e48d52ec3c3a3d165399b3e3b2cfef522561c8922",
+ "name": "plugins/modules/na_ontap_nvme_subsystem.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "08aca6e703b1b1bb010b28f244c710f96d28db76dfbbacb9bf75943bec39e0db",
+ "name": "plugins/modules/na_ontap_ntp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c8b4ba95fd5281f1c43ca89d1b7e5ecdf232b3d7540c9618c36b17a94c766be6",
+ "name": "plugins/modules/na_ontap_ntfs_sd.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d0a98516a0e575d93e57e362a0af948c93e9417d34e1e2899d48f16619630404",
+ "name": "plugins/modules/na_ontap_lun.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "95780a330f756d1337d9704ebd4839b37d32423e1a9c8a4475a542c9d5f25e4e",
+ "name": "plugins/modules/na_ontap_zapit.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b73c1867760f03c85e2f1ed5ba7b977f51783cbdb557b5866a964a8468f91872",
+ "name": "plugins/modules/na_ontap_ldap.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0fa7dbe2b7a7288c5baad791759262da75bf21eef8a2a14395b53b0c3e2be35b",
+ "name": "plugins/modules/na_ontap_ldap_client.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "48e1b103c997be845fbb7205763d19dfa86b275ca7b5178b153fd1a0ae31c7d5",
+ "name": "plugins/modules/na_ontap_metrocluster.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b53e05b6002f49b6eb73ec672eea8363856160d3d43e77338feff56208da6033",
+ "name": "plugins/modules/na_ontap_net_vlan.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3b630dbb18753a62ceea3ba50517433964f2b58499a29d5c398f7bbbb6cbff56",
+ "name": "plugins/modules/na_ontap_cifs_server.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8251fa178caeb48b44e4410275ade03e2b6527e46204edf699b6bc59e351196f",
+ "name": "plugins/modules/na_ontap_ipspace.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "30318d2247d12837af970f38722f22a8c468d45b43a0717c0d4de0b9f7880fe5",
+ "name": "plugins/modules/na_ontap_info.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e7fa1841152108d7187716de575c6616d12982af8c8e99111d8d953d45509ff8",
+ "name": "plugins/modules/na_ontap_rest_cli.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d78612443a974f2dfef39cba4fa79b9167240660307ec9e4bf6cb0f83027c202",
+ "name": "plugins/modules/na_ontap_unix_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ecddcc49f1a80c019737f8a4c56a83b989c2c97a9c52ca059b31ec3cf786cb52",
+ "name": "plugins/modules/na_ontap_ssh_command.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "15d95381f09c176d070e473b8250354f9bb71642adfb681885358219dfc3cd8e",
+ "name": "plugins/modules/na_ontap_snapshot.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "389df2ce0c3241e5690af363df56b01fd7b5569c1eeea923604c92f20465ede3",
+ "name": "plugins/modules/na_ontap_login_messages.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "cb8ace0315f27c7098c3988a30c827e3d45d82e97fe387c25b4a7d81809c1d71",
+ "name": "plugins/modules/na_ontap_vserver_cifs_security.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "34020c64d4eb953430f53f4572e797629080182e8fbf71f7eeed54f25d947b61",
+ "name": "plugins/modules/na_ontap_ndmp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a83da81f69464212202b9be3f904347802bc4afd32e9ae5500e538d81c3c710f",
+ "name": "plugins/modules/na_ontap_object_store.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "649e9fc92ede91192819756e4a6626f2e6bdca8f6102319fe8b143765f78bcfd",
+ "name": "plugins/modules/na_ontap_qos_policy_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8f49441fcb7ed6f8c1ddb42533c75b38244a6174a87ffb9676799a25d16e283d",
+ "name": "plugins/modules/na_ontap_node.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "56c0c6c7950533e2e97f76200105e1a6d1a532acc8d89a954307994e64d48d16",
+ "name": "plugins/modules/na_ontap_cifs_acl.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "98fd35675e66897ce0bc6bc0c2d39e1df8bf7b4cd6d2cdf058ec6d1228122c3e",
+ "name": "plugins/modules/na_ontap_cluster_peer.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9590c131eefbe14e2e04c55a7fd1b70d6480a6c92d07e42f92930268bd5d5a2e",
+ "name": "plugins/modules/na_ontap_vscan.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0047e258dbdf9db3ba948978b37afaa70cf148a2e73f42cc9be984ce2cf84900",
+ "name": "plugins/modules/na_ontap_nfs.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8ac202fe01db2d30f46365294d86c21372fd346da817aca34b03a8f17a65194c",
+ "name": "plugins/modules/na_ontap_igroup_initiator.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e4facbc11a770969dc1547da240cf70ab7428634df002c8ab506668b46ee2d95",
+ "name": "plugins/modules/na_ontap_svm_options.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "532e66bf9cd7299cdb0c932321726027721077828b0cdee51394cc6a96225e76",
+ "name": "plugins/modules/na_ontap_broadcast_domain.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "935d9a82a52859f6e883b7de553e30316e7400c907a2a408aa24f6ff8e27b075",
+ "name": "plugins/modules/na_ontap_volume_autosize.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "506d0782ba8fb2365c0b5062184fe56a7f076af03988717a9ebfe0f3c615c8ba",
+ "name": "plugins/modules/na_ontap_iscsi.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c2b015ab45c16197be6bd1749b0fab92c11189edfdfdacccedc9315a0c13cfaf",
+ "name": "plugins/modules/na_ontap_interface.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "760a41718d297dcb4e1655cc47a27597a5e0aeeb87d5cf62d2c9e7ae4c10ac75",
+ "name": "plugins/modules/na_ontap_firmware_upgrade.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f9c0977c3872c025d54d84b1b8779cab004405db94ce77b011501cca662f6ae0",
+ "name": "plugins/modules/na_ontap_motd.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "de0a54608065ddc667705fc0575571db2edf27d63ecce947310a23bbbefe3aeb",
+ "name": "plugins/modules/na_ontap_svm.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "759f0d04ba0ca17f4db223832829c3ecc7f0e4af685bc1f0d9128be0e85c31f0",
+ "name": "plugins/modules/na_ontap_cg_snapshot.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "54ceb31d536dec8e84fbb2c705cfb09d3b33be63a64edc586248d20e985e5aa8",
+ "name": "plugins/modules/na_ontap_name_service_switch.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c2592526e21e035fec54c27b21796527021db922e456f3e46cc2feef30efd8ac",
+ "name": "plugins/modules/na_ontap_qtree.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "406f59959e7ed12e4c694df2afce540d66edb5b9e1a5c165301876ce05d55980",
+ "name": "plugins/modules/na_ontap_vscan_scanner_pool.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4541374a701ecf4f6c2f76586d6fcb578845fbcc3bf172ca0c7fedb3744071d3",
+ "name": "plugins/modules/na_ontap_firewall_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "46b7f497662947e9efb3a8a771a90980ba518d28557deb671a252cf60b22ba44",
+ "name": "plugins/modules/na_ontap_cluster.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "efec58f00004492025a4ca14d1f907de990141faf70292a80dd92e597807e3bd",
+ "name": "plugins/modules/na_ontap_security_key_manager.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "35cf27dabdf7335084f8b51ae02eca65f5390033e577686cf09917d555fedfc3",
+ "name": "plugins/modules/na_ontap_net_subnet.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f1136055eaf8510bbd33d5b0c0dbaa8852b1855a1087d16ebc355daf5b718741",
+ "name": "plugins/modules/na_ontap_autosupport.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9bab5e5cf55ddf734a0a1db2bfe86b60f47bca92b53682e1c48dc17a8b9f62b3",
+ "name": "plugins/modules/na_ontap_portset.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a5cf3a22af5d4dd963ca7b31db28ed687263b763c28d56183d739e3f372e6843",
+ "name": "plugins/modules/na_ontap_rest_info.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c1205abf4fc4adeeec062b617906214f8f6db4a81c938ad4eb91aeb24e334785",
+ "name": "plugins/modules/na_ontap_ucadapter.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "500dafb551c6e86773812362f693850cae02d8a3bac82e7eadd7388f2fd2e3fa",
+ "name": "plugins/modules/na_ontap_broadcast_domain_ports.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "708fff6e32e0e07218a31017a10870396c004412f172ae1a53d6453bacef3837",
+ "name": "plugins/modules/na_ontap_export_policy_rule.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "05066abd100446f3b226d9e4e5020d5e28bdc66ab38061d717f9e5318d07c6fc",
+ "name": "plugins/modules/na_ontap_igroup.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "93205fc791bbceb3b100fefc6447ded7397b5366e4f0504c234894a0d3ef9856",
+ "name": "plugins/modules/na_ontap_net_ifgrp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "76a35e8e675d2fcc66184e4a88811de82190c5b172f43c1b1e0a36a5c4cfde72",
+ "name": "plugins/modules/na_ontap_software_update.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c25f6e90673a70801372f33cff9260a774e34f34e771636b755e990505b012fc",
+ "name": "plugins/modules/na_ontap_kerberos_realm.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e73d600f53c07f8d49d21da315a0c14e5401fe714afd29772ce2e609e23284c6",
+ "name": "plugins/modules/na_ontap_nvme_namespace.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0717ce726b3c8074260d97528ccb351328200b5f916c27e945cd8ea79f239b96",
+ "name": "plugins/modules/na_ontap_vscan_on_access_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6da0207fd035e1ec95b7d20d112db5cdf061a8b532a45c5358a8f304d7bec91e",
+ "name": "plugins/modules/na_ontap_vserver_peer.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d8cd24b943a3479f0b5ad418dd52f9ed38d11e5a24102e2f3590b983515a251c",
+ "name": "plugins/modules/na_ontap_lun_copy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "44c49d0c9321c3cb0d218266a9aabfaa8573fdfbff8c091aeed8ac69521239c4",
+ "name": "plugins/modules/na_ontap_license.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a19f87688b762c5138d79edb8b378d1f0d10910d17b665307efceb4f4cf31fa7",
+ "name": "plugins/modules/na_ontap_snmp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5492b481410c0cbf0de653854d279b9470e5d54b208bf3e224294ef15af56f97",
+ "name": "plugins/modules/na_ontap_file_directory_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "99063dd311587b62ec9ca52c3e61b543e0fdf31a46254fd1dda6e94d1d03a1c2",
+ "name": "plugins/modules/na_ontap_disks.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b9c5d07e1b459d3b4f36a621cd120d461b3d7c378d2453a5e8413c7f7bea42b8",
+ "name": "plugins/modules/na_ontap_aggregate.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "da5a1d62c071c8c3e0d5d599584478b8e7b29ecf26c41ba5f61c35ff0f64a06e",
+ "name": "plugins/modules/na_ontap_fcp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a4584f391ed4d30a66683aef8ad74719a5faecf69ecacd86a298fe40a2bded0e",
+ "name": "plugins/modules/na_ontap_user_role.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2e8bb36dc5638fe099f828a4ef5a6049074ead81325b735bb5fc3220cefec9df",
+ "name": "plugins/modules/na_ontap_cluster_ha.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c39278365469670acfe32eaccd78799f3f3ad24407b0b90ff18a0c3f7b179c45",
+ "name": "plugins/modules/na_ontap_active_directory.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/compat",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096",
+ "name": "tests/unit/compat/unittest.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1",
+ "name": "tests/unit/compat/builtins.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "tests/unit/compat/__init__.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99",
+ "name": "tests/unit/compat/mock.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "57288e7aa827906853f0bc6f29bebf2ee82e6415145b5fb44f3eb80ba5019977",
+ "name": "tests/unit/requirements.txt",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/plugins",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/plugins/module_utils",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e4b08fb79b0a36048ea0efca6e0bcde8a1d6b682509772bb61bf16ec030d3d9d",
+ "name": "tests/unit/plugins/module_utils/test_netapp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2898d22d49f6759e7abfc8d00912c2a74c614fc4b2b2a4137f8694ed47f65759",
+ "name": "tests/unit/plugins/module_utils/test_netapp_module.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/unit/plugins/modules",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8a8c7e69a1ca8b34f00ebd87608a5011a130bfa82b5899493ab4b182ce76aaa4",
+ "name": "tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "561dfe182c0da0627fcff9ed229db65c246f57025db144a2a035c4e7c6d791c9",
+ "name": "tests/unit/plugins/modules/test_na_ontap_rest_info.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2398d2271f5a5defc9e6da8bced4da67f0204fb755ccc48f94e94947979ae531",
+ "name": "tests/unit/plugins/modules/test_na_ontap_ucadapter.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ee93cc963613c240ed315490bebe63218752f7c1ff258b24ba2d56b9c50e9bb8",
+ "name": "tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "48b22e8ad4d419dc3cf41cac8c2b3be111ee6410bd9113375e65bc0d417e9b1d",
+ "name": "tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0f8a6988f9aa1db30a85f35872741bdde7b18af67025e725131efa1b2a3aa2f3",
+ "name": "tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e726a14b5d1567d30b6ca98cf4e97cf9cb3c28f13509573a6c28850b76d255c7",
+ "name": "tests/unit/plugins/modules/test_na_ontap_name_service_switch.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0a173264fb6243d4985336dec0a46fffd527f0f9a154b9f62f996392eecf7994",
+ "name": "tests/unit/plugins/modules/test_na_ontap_service_processor_network.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "666a91b8e4a4635042017dea916e01fb8e1df4b52cfec644a0d75f41d63ff85d",
+ "name": "tests/unit/plugins/modules/test_na_ontap_snapshot.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "746f82b699a08659065969a3daa10cb0da81043ccf4346c25585af660a728c1b",
+ "name": "tests/unit/plugins/modules/test_na_ontap_rest_cli.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1707cfbc1093d823266a365fe94d172ac2b6f657df0de64789c1888b460da3f5",
+ "name": "tests/unit/plugins/modules/test_na_ontap_software_update.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fc0a4c41218130b1893e805a63a34e576b1d6e317a311b2e272d50502b9b5f58",
+ "name": "tests/unit/plugins/modules/test_na_ontap_iscsi_security.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f7de9eb0347daeb93677dcd17700f5b397267c184ca8f4dd94a0a8eee305393d",
+ "name": "tests/unit/plugins/modules/test_na_ontap_cluster.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "78b136580c8838e722e18b472d633acad19238ab9cd8f3c19bb7f08c194a0215",
+ "name": "tests/unit/plugins/modules/test_na_ontap_svm.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "cd3afc86052595c5a72d643ded96aa57ba7263cce63674bd064f27b18e04f315",
+ "name": "tests/unit/plugins/modules/test_na_ontap_qtree.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0581cda91ae035b55568c272c62df71d3f908a018519f0ba83e564cb28443e90",
+ "name": "tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e35c2da73aa7a3e3cd9a523b9419e2b50feebeb6b180703b4f578b163f9eb4cf",
+ "name": "tests/unit/plugins/modules/test_na_ontap_cifs.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "13f339a8ca57d67796de3c5a21aeca85713841066a83f6d414dfd1328c8eaa3f",
+ "name": "tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5c1abf8577e5fde12b405eb93a2efe84a9198f82c0d5a51ecffd55eff68e4edc",
+ "name": "tests/unit/plugins/modules/test_na_ontap_portset.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9e022ceaf1e6a331018f396023e79dab0f0d849fa14b516e2338738b989fd2b7",
+ "name": "tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4e051f1161f400635f514c8bfbc7e3076a2f4abbaf655f55195accaab2742e62",
+ "name": "tests/unit/plugins/modules/test_na_ontap_aggregate.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f2940f45a472849b4092de7716118aa493a6e3f1c3ed013fb939e4b3eff49d8f",
+ "name": "tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3bbc3f13520836f9b337ce81a549f7b9f1bbc3d9947feff423b103ea121b99cd",
+ "name": "tests/unit/plugins/modules/test_na_ontap_user_role.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8c83e5fe37ae9cf16cabd7abfc40ffd2a0ba4fabf4fdce4583a4da32bf614053",
+ "name": "tests/unit/plugins/modules/test_na_ontap_cifs_server.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "cebe54d9f85fcd6a9c90623ddfbf8ce66860bdb7b5fff5fb50ea9c72890a43bb",
+ "name": "tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "abe01c105569a66cb2ce77168d09e1fce228aca9959235017536dee5d035c53d",
+ "name": "tests/unit/plugins/modules/test_na_ontap_vserver_peer.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7d4ae84b9d2c8c76423607f460b2a51dfeb6e06a237749f5f92ff944a3602318",
+ "name": "tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0781ce01d7073a5b02dbf46eeb01deb3293ffce886e1d9a3a0583af739c56fd6",
+ "name": "tests/unit/plugins/modules/test_na_ontap_firewall_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "dd683d2b9d4ca9fe88dc6e0c6a9a1e89d3671ad250eb8d649cf2577537424cff",
+ "name": "tests/unit/plugins/modules/test_na_ontap_snapmirror.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1e09b96ad343eb8894175798429dc9dc3b54d148efb963a7efb8285832b3c682",
+ "name": "tests/unit/plugins/modules/test_na_ontap_cluster_peer.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "aff2f7a481da7fb1dbe743f0193ba06eaf8111ef515b56056c054cbb8352f4bc",
+ "name": "tests/unit/plugins/modules/test_na_ontap_ipspace.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b226c9d9b269b6a92a086e95617a6a398521db0340db7c97951e31329a9296dd",
+ "name": "tests/unit/plugins/modules/test_na_ontap_object_store.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3e41dce8d7e809ceecde997fef4f90a92946d29f50ceca0bcdfaaa9a19ccddb9",
+ "name": "tests/unit/plugins/modules/test_na_ontap_template.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "19cf62313f5669704b4f2b424dd1b2b567302d273d05ba1926b1c1173d4d2ab5",
+ "name": "tests/unit/plugins/modules/test_na_ontap_volume_autosize.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a99c83ae8cd0d0fa73a7feb6db9bc02edb24b84173554c6a7af4d63385b6cb92",
+ "name": "tests/unit/plugins/modules/test_na_ontap_security_key_manager.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e381955f8ad931eb41df6a88da11af0021c59a676e715938d551901c5334e796",
+ "name": "tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3c1d3e8651ff5dbff173752effb1817db95533aa09f2ffb151326729da72e470",
+ "name": "tests/unit/plugins/modules/test_na_ontap_lun_rest.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6bbdeb821324c1b2abdd54ed88034ce4070a28c1703141e861542e1b77188a21",
+ "name": "tests/unit/plugins/modules/test_na_ontap_lun_copy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "98ba725d24ee101ff28ee5625ca4efaf72b7f7811de44d34aee576cc72070210",
+ "name": "tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "aad4e4fd1dbbba5cbc4967b4ad87d098361b470aa9f4173eb2fb5da17c83eda7",
+ "name": "tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2a1aeb9ff2ad410cb46599ebe27316882b54abe4577f9f3f2b79deda3daa5cb2",
+ "name": "tests/unit/plugins/modules/test_na_ontap_net_routes.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "52f4694b4fadca57595e9b5389752de90697fe01c99c12f5ac5290a6ef1606de",
+ "name": "tests/unit/plugins/modules/test_na_ontap_interface.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7c6dd59778f4a6ac9b9c45d7778ef5ed1007e589f3985d98105be42498e5a957",
+ "name": "tests/unit/plugins/modules/test_na_ontap_vscan.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "addf3c77b67c6dd7d6747c5be42f5a4fd25c323534263874be255c0213768042",
+ "name": "tests/unit/plugins/modules/test_na_ontap_autosupport.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f945fc6dae22d8a8a3ef01d5eba9c21e0681e551b86178c392f9240cc0766ccc",
+ "name": "tests/unit/plugins/modules/test_na_ontap_igroup.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "bd4d208ff3c3957a7b5b432810fa35e6a907c1a0b68fe4919570c0e432675c1f",
+ "name": "tests/unit/plugins/modules/test_na_ontap_nvme.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3f185493d14c0ff1427c4646f7c1f9cbc3448ded40b11dede103301b49faf6f1",
+ "name": "tests/unit/plugins/modules/test_na_ontap_nfs.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "db6a4ef3df746d22e237783f6938ec0b4a5a3026cafb422bbf76d5b7b6963b23",
+ "name": "tests/unit/plugins/modules/test_na_ontap_user.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "201b1cdc0f42eecfa8cf28ff24803062a360a8b81b86863eb1936d4c8f9fc33b",
+ "name": "tests/unit/plugins/modules/test_na_ontap_lun_map.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ee5a66ff4086f2f44229221ef198c1f8f5b1e38a24a312e5fd5ac46c27147b3b",
+ "name": "tests/unit/plugins/modules/test_na_ontap_volume.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c860ba0731f239bd551e912be5de15e5b6fdf0c34b4d17b3645ed120b625a3d3",
+ "name": "tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "109a79161f769ee814e1df3b0e4808146e20e23f2f60b4dfb3f72aa7da0d8281",
+ "name": "tests/unit/plugins/modules/test_na_ontap_net_subnet.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4579a358fab48e7f6fe90eec20d48be83717859eeb2db16f254737eda6287b72",
+ "name": "tests/unit/plugins/modules/test_na_ontap_unix_user.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "008422a4188007f19a023eab29810f2af3c93dca1e7f604989b81f0efbf8d467",
+ "name": "tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "23cd08a6f4767e35b7d14cbe8aa5649725fd098578ee7a76db1ad337a3d4ea19",
+ "name": "tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "219b41f88a1d3a2aa3298694b1d4cd73e64d4a2c749693a73ce992c2c19fce2f",
+ "name": "tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9b19b41e58db980090310ed2f81eecc8d84da507f7ed9e236bb1919a53e633d6",
+ "name": "tests/unit/plugins/modules/test_na_ontap_export_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "472e753c5d5ef81968e0a31f1e88c679010874263afb0ae53079c232ffd43bfa",
+ "name": "tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "045bb466cf407235bfd1e7b0c2ae0adc1a4c070e089eb2b9317c9319fb6dfa4e",
+ "name": "tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2de698a2ac3c96ad4e318eae99508d5573260f0b403ca07a82c6613a511f626d",
+ "name": "tests/unit/plugins/modules/test_na_ontap_security_certificates.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ad2c270753668cb60c2f3aaf33eb6a7010a076f44cdd60cd8b579bd2e3f90ee6",
+ "name": "tests/unit/plugins/modules/test_na_ontap_metrocluster.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1aac5195217453639b39512537287592f1fe6e705b06138e9914f7e7aaf4c114",
+ "name": "tests/unit/plugins/modules/test_na_ontap_ndmp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ff1eda14344fdf6bdcaf12cc713e4b0e0cc587775c4b7a56b61f69f9f932226c",
+ "name": "tests/unit/plugins/modules/test_na_ontap_command.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a9ebd2768e3f144705864657727770b862aa322b93b5ff253eb80428f07c4a59",
+ "name": "tests/unit/plugins/modules/test_na_ontap_lun.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d779f0a6f6820ce86f2d582bcd6619f97e1de156cd099ccd2033afb9e69b58bb",
+ "name": "tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "be85d0d4b8f3698cb9b13e80c58e2a3a0f1173542cfd93d99385cf787cd9aab8",
+ "name": "tests/unit/plugins/modules/test_na_ontap_info.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3515f35fabf745085c236a55e133b3a640761a06b5273752fe7e09ee8df7c9ee",
+ "name": "tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3515baca601a65c8989660bab6ef11e3928bfd4922960e64454e8bc58a21138e",
+ "name": "tests/unit/plugins/modules/test_na_ontap_volume_rest.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f1de8ec9f6a5cfbebfd35dfa513b177aea9aa6a88de1f12ecd81ad2ed9399a57",
+ "name": "tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "020f267f0c908b6db60310059aa9d37c0a106fee16523f0cdc17b92c4c0bb73d",
+ "name": "tests/unit/plugins/modules/test_na_ontap_net_port.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "73f9ff1640a9cea9e5757738dc91a4ab9d0d479652cf5d7edcbd3aa29824d3a7",
+ "name": "tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "66908ba2543f0eb2a97eca26d3164a1c617f3be81bbf5228e712881f0220393a",
+ "name": "tests/unit/plugins/modules/test_na_ontap_job_schedule.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b8907bbc3dc75d708519607e509418fff22a0dea3d40e90c24c648b1029c2ae9",
+ "name": "tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "45a41d5d7916461ffcf246baa47a311f16c2e63db0ca4e7fdd2841bb45705446",
+ "name": "tests/unit/plugins/modules/test_na_ontap_unix_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a8855f1a894b418d906192cf88a0e598383c924de6dc8ace53fe1f77ca07faa4",
+ "name": "tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "16360ce94bbb2eb2441616fb21cb5eb1dd4d68cc11cddba0cc1e20e1607d8d27",
+ "name": "tests/unit/plugins/modules/test_na_ontap_ldap_client.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8ba1e9d3837910ed03402a335c5e8db4241d4d7c9792224934133c64135e8463",
+ "name": "tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8898454107ffcbd1a64a832c9f366534a32de2543b3fe833b9104c9b72256901",
+ "name": "tests/unit/plugins/modules/test_na_ontap_volume_clone.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "28f429a06711268655ded61baba287321f4ac57e16dd0ffae66d18d7c2c56d5f",
+ "name": "tests/unit/plugins/modules/test_na_ontap_motd.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c8bb65bec61aac484448604aff79a56ae26c04b1c2acd0e72d772b0d6ca824ab",
+ "name": "tests/unit/plugins/modules/test_na_ontap_quotas.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c3c34e68bc4ec66a1dd39fd08d5b45d8837cf322f18595a802a7212a4912b538",
+ "name": "tests/unit/plugins/modules/test_na_ontap_dns.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7610957ab058ef4dfc0a8a8b7ddcff6e98a280d27ba8b3b6175764590c454cc7",
+ "name": "tests/unit/plugins/modules/test_na_ontap_flexcache.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "75378af4c8dc017b3ed6dfaa773e7fdc90728fa5df7a5caaedc0a02994eb39cf",
+ "name": "tests/unit/plugins/modules/test_na_ontap_ports.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4f0963093fe313dd73cd9a4da2216def87c37bd2e7725a097fc0e4a1a03e44c9",
+ "name": "tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "54951e5fadf1680437906fc7f56d9f60bd3f27c532d26d644c7c0d7688b9cb7f",
+ "name": "tests/unit/plugins/modules/test_na_ontap_login_messages.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "023e19e21bf1b2da3efbd5d0eed558bf999de0d75cefa2da37e038564bc427b8",
+ "name": "tests/unit/plugins/modules/test_na_ontap_quota_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/sanity",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8f972d81f03bf078a65a205b015a7aaf0ab8defd0bdcba445f4e1b56a944acbd",
+ "name": "tests/sanity/ignore-2.10.txt",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3350343c36b48033fcf34615d9079d5787e33d7d4772d97b0acaf77cec5c0dcd",
+ "name": "tests/sanity/ignore-2.9.txt",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_san_create",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_san_create/vars",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "18f0da43241ea031ad542199a494c9bd41497e433a76b10877a0e99943aacff2",
+ "name": "roles/na_ontap_san_create/vars/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_san_create/tasks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "407ab2176c0ab9b7a76cd211ef521707d7e235c54541675dded3072cb7986396",
+ "name": "roles/na_ontap_san_create/tasks/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "name": "roles/na_ontap_san_create/LICENSE",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_san_create/tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c39e1a82c97fa762f3a69e75a0ce6a94fbb9495644ee13bb5317946cfdd38ff9",
+ "name": "roles/na_ontap_san_create/tests/test.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "name": "roles/na_ontap_san_create/tests/inventory",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_san_create/meta",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d246e4414698cea9a28832538e8d758995a82139ddabd071ac774b5d5535c5d5",
+ "name": "roles/na_ontap_san_create/meta/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "000ed1e09f4456f226738ce9bde1aeeaf4d6ca3218e84dbfdecbc8945211d1e2",
+ "name": "roles/na_ontap_san_create/README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_san_create/defaults",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "09a1aed78d518f0c1be75e821a8e3ebfccea529830af65c9b8277ec93c6f28e5",
+ "name": "roles/na_ontap_san_create/defaults/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_san_create/handlers",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b20386ed515535fd893ba13348c997b5d8528811ca426842a83ed43a584ee3af",
+ "name": "roles/na_ontap_san_create/handlers/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_cluster_config",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_cluster_config/vars",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4e962daf92e5cb4b3e2602824df206c8e4eb8e32ffa16cf10a5507728e7d1e84",
+ "name": "roles/na_ontap_cluster_config/vars/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_cluster_config/tasks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ba13d898817d36c2c517b1b50b9ec955f21501ad68fbd18791c83a83703d7e09",
+ "name": "roles/na_ontap_cluster_config/tasks/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8486a10c4393cee1c25392769ddd3b2d6c242d6ec7928e1414efff7dfb2f07ef",
+ "name": "roles/na_ontap_cluster_config/LICENSE",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_cluster_config/tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b8f9e458cc16f29d894aa55a0f3ab18f8e0b227658af5ac78efe4e5ccfd508a5",
+ "name": "roles/na_ontap_cluster_config/tests/test.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "name": "roles/na_ontap_cluster_config/tests/inventory",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_cluster_config/meta",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "38945ac5667c2c76acd3fda9fb32e4964cb8c9d33e66186083c4db75ea3608fc",
+ "name": "roles/na_ontap_cluster_config/meta/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "551eb6b9bbcb3dc35586575721150e53f68c7a5160e34a64e45c54ec4d3f5ad8",
+ "name": "roles/na_ontap_cluster_config/README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_cluster_config/defaults",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "94221b229375a7d7b223cf93c1c1310a42e7279673cf03ac0ed97ea0642c54ae",
+ "name": "roles/na_ontap_cluster_config/defaults/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_cluster_config/handlers",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "332a0407082dd7ad159029403c0db7c1586b3d8e8f0019bec1330181c0caea4f",
+ "name": "roles/na_ontap_cluster_config/handlers/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_nas_create",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_nas_create/vars",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "18f0da43241ea031ad542199a494c9bd41497e433a76b10877a0e99943aacff2",
+ "name": "roles/na_ontap_nas_create/vars/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_nas_create/tasks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "18f3a09c48bc14d3ad330abece58b9c1eb7fa9e2374ac187b4133f95f6869cd1",
+ "name": "roles/na_ontap_nas_create/tasks/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "name": "roles/na_ontap_nas_create/LICENSE",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_nas_create/tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "78ad0b52f5ee91ba11d6fbc0a78bb875b9163f2174e35eafe94d33eac1402c52",
+ "name": "roles/na_ontap_nas_create/tests/test.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "name": "roles/na_ontap_nas_create/tests/inventory",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_nas_create/meta",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5b4c026ebfffba631ae8cb19a8de3f037e67a849f2baff3a9cf9d612b2bdb0ec",
+ "name": "roles/na_ontap_nas_create/meta/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d9746759cc2df9ca92cacc1ec402d8ddb764ee22139c88094710aa10d82cdbad",
+ "name": "roles/na_ontap_nas_create/README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_nas_create/defaults",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e32c08abf9649939f0e9d7b385ef233a5e8f6a181a96edf93d2b27e5a9e7a77c",
+ "name": "roles/na_ontap_nas_create/defaults/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_nas_create/handlers",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3e679547628d573a12dbbb9aad72a51a76cb13f9df0ed4e202031d841c6f8be1",
+ "name": "roles/na_ontap_nas_create/handlers/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_snapmirror_create",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_snapmirror_create/vars",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6b0116be533ec31ef0563af71e3ce6273887f84a1cda3afbab39cb51a8d0c99d",
+ "name": "roles/na_ontap_snapmirror_create/vars/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_snapmirror_create/tasks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9b31411560870b8c350e98f52d1b55f00162fb6f3d63281b49e2709037e4cd04",
+ "name": "roles/na_ontap_snapmirror_create/tasks/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_snapmirror_create/tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8703d74e07c6c870fae5bb548f0f94bdb13ffd5fbd2d21dc9cd7f749b97fe900",
+ "name": "roles/na_ontap_snapmirror_create/tests/test.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "name": "roles/na_ontap_snapmirror_create/tests/inventory",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_snapmirror_create/meta",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3257c32777454f16ceb5e2b8a4ec523658754aa2e343c6a19df00d74bed5824d",
+ "name": "roles/na_ontap_snapmirror_create/meta/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "87c82405910802bdcfdba3020fced7bd62589ad055995bb81a42c4721c1016f6",
+ "name": "roles/na_ontap_snapmirror_create/README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_snapmirror_create/defaults",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "84ffc0ebe72a90d4092d78be05d47f2b657d0eb0a8d9f8b16c81c8a84e512212",
+ "name": "roles/na_ontap_snapmirror_create/defaults/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_snapmirror_create/handlers",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "063e11fe54a19476190c1ea7fe25714bcad5168ac5e28405643ee96178d2542a",
+ "name": "roles/na_ontap_snapmirror_create/handlers/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_vserver_create",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_vserver_create/vars",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d5f92c61cfe06f0abe1fa48f281abf64bcb67e8bfa27cb761ed61686e1201eda",
+ "name": "roles/na_ontap_vserver_create/vars/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_vserver_create/tasks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3e877d064a36f98bcf155e0dcd12a2851cbfb6fea9c37487a8a8f324ee1e97a4",
+ "name": "roles/na_ontap_vserver_create/tasks/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "name": "roles/na_ontap_vserver_create/LICENSE",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_vserver_create/tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6ef36d674b56682bfa15768e8380ff34e254cc053e0da2e65ef2cc00be8aa23d",
+ "name": "roles/na_ontap_vserver_create/tests/test.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "name": "roles/na_ontap_vserver_create/tests/inventory",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_vserver_create/meta",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "74395723cb202397f3155578320e213c51fd5ec4762691951e354fa931ab7880",
+ "name": "roles/na_ontap_vserver_create/meta/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "74941df2e36fefaec0d17590ea31dfeb46dce4aaec40ea3cf0e92519f7f36641",
+ "name": "roles/na_ontap_vserver_create/README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_vserver_create/defaults",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3d4552ca0c6f9c060c6680dee89169df7afa2a6063576cc04c90dd40ade033f2",
+ "name": "roles/na_ontap_vserver_create/defaults/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles/na_ontap_vserver_create/handlers",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7271febdc6d62d5a8d19f0335fa2d57db369853e9462a250cc2b65eb234c52e4",
+ "name": "roles/na_ontap_vserver_create/handlers/main.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/examples",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e2539f89113f53927288c0f1514665d88179f2521684bf1f0191b4ca4aabf488",
+ "name": "playbooks/examples/na_ontap_pb_upgrade_firmware.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "956ff27f3a59afa9935155484fb0282625f7c49d8573c4458325754a284b73e5",
+ "name": "playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b7903d8edd6099a589d1e99bb7739113f53f07ccb81e0e5bccafe1e6f1d8499d",
+ "name": "playbooks/examples/na_ontap_pb_install_SSL_certificate.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "656438606c35b13cac05ad25ddd69415305123f896cd88a382d8411469e889cf",
+ "name": "playbooks/examples/README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/examples/json_query",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e5285071aec96c88c8ebe63c0e6fab224918e8b91637efa60bd9705e6bd777fa",
+ "name": "playbooks/examples/json_query/README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "36de4d3cce05ee744e0465b8f123913b285286ef56b6010f7e2bb96a5a3b1957",
+ "name": "playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d9360b9c1b85cfeac456be961e57934ca9fdbff39a4d96815578715a1328cd5c",
+ "name": "playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8f54ac0344e9bd529f398d75d111b7b9b8312fc753514362a51d4ab7812ad4bb",
+ "name": "playbooks/examples/ontap_vars_file.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "746f5cf868287b79e0f1cbb0a9ab19dff21a3e2153177483818f37a32c5fd69e",
+ "name": "playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "changelogs",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "changelogs/fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c0be94b8c59f1eb415a4e4e6c987772c0395a67064f8660aad11e97d665c2813",
+ "name": "changelogs/fragments/DEVOPS-3329.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c6d7b713bdd8043c514e11416eee71a4372be1979b72396ec630771aa0df9d82",
+ "name": "changelogs/fragments/DEVOPS-3368.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d33814b718f1d3a86b69c4723a1f4e68ea28b184ef561643c80212bf62ba9ab3",
+ "name": "changelogs/fragments/20.3.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "069204db1e6da702e07111fac767a5044210fbbede5b9ecb30da7031356b6704",
+ "name": "changelogs/fragments/DEVOPS-3149.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "26a62f18ddac258e5f97032e42c4d801115c0b03c76654918267c2a4d18777d1",
+ "name": "changelogs/fragments/DEVOPS-2426.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "52a66cdff25de9b14c4be8365e3130f5ada9ab7b9f20c40b8635c1f552291b1c",
+ "name": "changelogs/fragments/20.1.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7f1762d7b528ab3861fcc4c056ee0fb841600bf50711fd9ba760726b8612a310",
+ "name": "changelogs/fragments/DEVOPS-3369.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e6dbb60010475fc16a75a30f87938cf1e8683e272d1ae4249789d036cfa28de0",
+ "name": "changelogs/fragments/DEVOPS-3386.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "822fed342ba51d797c0d796b1c9f4c4cf376c3ced2834ea101a8177bd42485ea",
+ "name": "changelogs/fragments/DEVOPS-3390.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "885ee5ca962cdf4dd7c888a936f057c0a401414c65571a4030628fb4cdf6b32d",
+ "name": "changelogs/fragments/DEVOPS-3113.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d2abacb569d9adbd7b7587b7190f5e03fd88bbdc1c246d8bd73d13ad82b4413f",
+ "name": "changelogs/fragments/DEVOPS-3304.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "629aa011160d1d211417076d19b82f46c782b4c977d82405b65a4afb6a9f8c0c",
+ "name": "changelogs/fragments/DEVOPS-3312.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ae8d980c571479c22756401c2dae46fa7526ac8e1bd071ff6a3c8b1d160e207d",
+ "name": "changelogs/fragments/DEVOPS-3358.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e2f10c30cb7f7e72a5cc918a8c70a6d4aa46b6427131569c565af246ec5075e8",
+ "name": "changelogs/fragments/github-56.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9a45f0c9e764458cff6d07469a5bff424024e43f74a4d7960235ef6a8d06d972",
+ "name": "changelogs/fragments/20.5.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6e0c8170836c1972e5928a16e65535e63fd479bc4e197b77d7a3ee5616d6e2c8",
+ "name": "changelogs/fragments/DEVOPS-2965.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "68a3a7e3ae54392aecfe21feac211fc5539cd10d3526a24eec36c05d740bb393",
+ "name": "changelogs/fragments/20.9.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ee64020d610ab58bb7b01814c1f251898393c1ac439fd0e7eda3086175b912f7",
+ "name": "changelogs/fragments/DEVOPS-3354.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8d4c5c41a09e649bd883b7bf7e9282aae03c8c694b6ba826dba750bff9fe35a5",
+ "name": "changelogs/fragments/DEVOPS-3401.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b8c3b9dde1166cd5e04faaec18bfddf6b79d0c2e9e660ac7fee4c05c8e6720cd",
+ "name": "changelogs/fragments/DEVOPS-3400.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c7efe12ee6eebd037d85cd27de039581aa661646b6da344393ef77dec0fa2207",
+ "name": "changelogs/fragments/DEVOPS-3139.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6017cf3e9df1bb5e888813d13dfd69e8cf62a8bffa0dc326b7ed1cdc10d4136b",
+ "name": "changelogs/fragments/19.10.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5c1b5f6c2d7f52b2d7835a95a3e928fa7fe0a8188bec3604ddb6635ec36cec24",
+ "name": "changelogs/fragments/DEVOPS-3181.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8eeab9b764a6d8b4d70b21ed516793faeb8e0dae62a44b51046aa6affc9f0838",
+ "name": "changelogs/fragments/DEVOPS-3251.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8ae05f4b30ee077e947c9c17f1110fc38a1fe347e2519493ccb39a46ad0fe227",
+ "name": "changelogs/fragments/DEVOPS-3178.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "91e53b1c94a0925984b409d46ac2c50704d7eee14de2ea67d5be1360de7965de",
+ "name": "changelogs/fragments/DEVOPS-2964.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6184c68f2cdddda83afdd95f7b9523aa9679a701ff8a2189e7c61cb1edd1025d",
+ "name": "changelogs/fragments/20.7.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "29a4caa1c9decc1eaf4442b59a5cba4f6bf5e41e3b0107a70db053a00e2a3162",
+ "name": "changelogs/fragments/DEVOPS-3399.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fefcc121464cde10b40963deaf29159fcdefe108ebaf8b72adb4d1525290a0dc",
+ "name": "changelogs/fragments/20.2.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0730c6978d44995f2675e0a2b6a3c8d73f0d7e3c3fb1ef758a08607ebd6c4256",
+ "name": "changelogs/fragments/DEVOPS-3442.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e537dc1c387c40fbe90e4c014cd0e53e0359bdf179154d8ddfc3f17fcda1c5d3",
+ "name": "changelogs/fragments/DEVOPS-3454.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4f421581196d13e4737cf90233dde8c1791c90ce706ec84ca926e889bbac582c",
+ "name": "changelogs/fragments/DEVOPS-3194.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b9a4d56ad3d75b37f72420eb90d516e2d8cfed5796b5cf96967e3e011db0023a",
+ "name": "changelogs/fragments/DEVOPS-3443.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d081c6cd36ad9bc682598ab7970998af3a27506625315da10e8074c1f18749dc",
+ "name": "changelogs/fragments/DEVOPS-3385.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fb81b50d40454aa4816b265b805103e1e965449e33bf47c8893bd8795c78e4ae",
+ "name": "changelogs/fragments/DEVOPS-2668.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1543aa1015730c0d62906cd43b15d75f2272892b17960c648a0a0e72dd50b36c",
+ "name": "changelogs/fragments/DEVOPS-3346.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0e898205ff917c45fe4214fc93efc6d1d9dde2220fcbe84df45db6b8c6c607f3",
+ "name": "changelogs/fragments/20.4.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c883fec44f63c4e61696177246a87a7889570febf9938c56fb7aeef0b24037f5",
+ "name": "changelogs/fragments/DEVOPS-3366.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d30f60360c127c3223186e9b93acb5293d846698e0a8759907fd6b99f8c77081",
+ "name": "changelogs/fragments/DEVOPS-3310.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0d56008d00b6fc4f20d6ab7742bfc94da88009a199fcd895769aea7e63277867",
+ "name": "changelogs/fragments/20.8.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "40e1953925ea029d66655fc474d5f1fcddac23bbd38db80800f36526f8ea3951",
+ "name": "changelogs/fragments/DEVOPS-3262.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3dfa7436cee382333f2f49b62a4933c1293a7ea8aab027f923ca252bd5839b21",
+ "name": "changelogs/fragments/20.6.1.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a6903dfcba6ebcdde9dbeda9634d3992ecb9bc9681558b3b88f97ad4312366a1",
+ "name": "changelogs/fragments/DEVOPS-3167.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b3876566d792ae492f13ddc9e2b51be33ee82d319554c5fd8a9a627fabe14f6c",
+ "name": "changelogs/fragments/20.6.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b475d2aa51c1e5305b48770104d355fb59b13ec39758e4432156d7c44fab9dcc",
+ "name": "changelogs/fragments/DEVOPS-3371.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f8d7742342151743a7b4a27bac65d6aa361d041a7fd2f68b99b980df5e61f268",
+ "name": "changelogs/fragments/DEVOPS-3367.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4242572b057b756ccc9f2aba9906d429af16f874474edc95a7165062934d0c2d",
+ "name": "changelogs/fragments/20.4.1.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "588905e810b54e945acb17d2fe859cb954a71f5573bb58c2794416949695b88a",
+ "name": "changelogs/fragments/19.11.0.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "581117e6e90f32ce72d72e5e5b34c1bdc481976cbd64c50ce4a2669bfcdb633b",
+ "name": "changelogs/fragments/DEVOPS-3392.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7bdfc1bd5a49a86880d38b82b1c6a050df3d9659c3b74236293f81f536fc5ad6",
+ "name": "changelogs/config.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ee6bf1b250629185188c2a9d92c906195671ceeb9ed781647745a9d4736a4e4d",
+ "name": "changelogs/.plugin-cache.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7c0e3566d533366e9c3409a35375d8047688025745267f4dfe6f1d47d2004e91",
+ "name": "changelogs/changelog.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "406773fb4d4f1cc5e668fbfb225db55e3a6e9301c3ec22f6a896c35b3564ee28",
+ "name": "README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5f338eab118183cede7a5a93cb8bd4ab1d76d10b3630044ddcb47674b7a95a28",
+ "name": "CHANGELOG.rst",
+ "chksum_type": "sha256",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/MANIFEST.json b/collections-debian-merged/ansible_collections/netapp/ontap/MANIFEST.json
new file mode 100644
index 00000000..8b2efa00
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/MANIFEST.json
@@ -0,0 +1,32 @@
+{
+ "collection_info": {
+ "description": "NetApp ONTAP Collection",
+ "repository": "https://github.com/ansible-collections/netapp",
+ "tags": [
+ "storage"
+ ],
+ "dependencies": {},
+ "authors": [
+ "NetApp Ansible Team <ng-ansibleteam@netapp.com>"
+ ],
+ "issues": null,
+ "name": "ontap",
+ "license": [
+ "GPL-2.0-or-later"
+ ],
+ "documentation": null,
+ "namespace": "netapp",
+ "version": "20.12.0",
+ "readme": "README.md",
+ "license_file": null,
+ "homepage": "https://netapp.io/configuration-management-and-automation/"
+ },
+ "file_manifest_file": {
+ "format": 1,
+ "ftype": "file",
+ "chksum_sha256": "816c63eb002b92b5c4881bfbfb50b7dd48c1106e6cc69c381bac654fe6aca5d7",
+ "name": "FILES.json",
+ "chksum_type": "sha256"
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/README.md b/collections-debian-merged/ansible_collections/netapp/ontap/README.md
new file mode 100644
index 00000000..c196ace0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/README.md
@@ -0,0 +1,532 @@
+=============================================================
+
+ netapp.ontap
+
+ NetApp ONTAP Collection
+
+ Copyright (c) 2019 NetApp, Inc. All rights reserved.
+ Specifications subject to change without notice.
+
+=============================================================
+# Installation
+```bash
+ansible-galaxy collection install netapp.ontap
+```
+To use this collection, add the following to the top of your playbook, without this you will be using Ansible 2.9 version of the module
+```
+collections:
+ - netapp.ontap
+```
+# Need help
+Join our Slack Channel at [Netapp.io](http://netapp.io/slack)
+
+# Release Notes
+
+## 20.12.0
+
+### New Options
+ - na_ontap_igroup - new option `os_type` to replace `ostype` (but ostype is still accepted).
+ - na_ontap_info - new fact: cifs_options_info.
+ - na_ontap_info - new fact: cluster_log_forwarding_info.
+ - na_ontap_info - new fact: event_notification_destination_info.
+ - na_ontap_info - new fact: event_notification_info.
+ - na_ontap_info - new fact: security_login_role_config_info.
+ - na_ontap_info - new fact: security_login_role_info.
+ - na_ontap_lun - new option `from_name` to rename a LUN.
+ - na_ontap_lun - new option `os_type` to replace `ostype` (but ostype is still accepted), and removed default to `image`.
+ - na_ontap_lun - new option `qos_policy_group` to assign a qos_policy_group to a LUN.
+ - na_ontap_lun - new option `san_application_template` to create LUNs without explicitly creating a volume and using REST APIs.
+ - na_ontap_qos_policy_group - new option `is_shared` for sharing QOS SLOs or not.
+ - na_ontap_quota_policy - new option `auto_assign` to assign quota policy to vserver.
+ - na_ontap_quotas - new option `activate_quota_on_change` to resize or reinitialize quotas.
+ - na_ontap_quotas - new option `perform_user_mapping` to perform user mapping for the user specified in quota-target.
+ - na_ontap_rest_info - Support for gather subsets: `cifs_home_directory_info, cluster_software_download, event_notification_info, event_notification_destination_info, security_login_info, security_login_rest_role_info`
+ - na_ontap_svm - warning for `aggr_list` wildcard value(`*`) in create\modify idempotency.
+ - na_ontap_volume - `compression` to enable compression on a FAS volume.
+ - na_ontap_volume - `inline-compression` to enable inline compression on a volume.
+ - na_ontap_volume - `nas_application_template` to create a volume using nas application REST API.
+ - na_ontap_volume - `size_change_threshold` to ignore small changes in volume size.
+ - na_ontap_volume - `sizing_method` to resize a FlexGroup using REST.
+
+### Bug fixes
+ - na_ontap_broadcast_domain_ports - properly report check_mode `changed`.
+ - na_ontap_cifs - fix for AttributeError - 'NoneType' object has no attribute 'get' on line 300
+ - na_ontap_user - application parameter expects only ``service_processor`` but module supports ``service-processor``.
+ - na_ontap_volume - change in volume type was ignored and now reporting an error.
+ - na_ontap_volume - checking for success before failure lead to 'NoneType' object has no attribute 'get_child_by_name' when modifying a Flexcache volume.
+
+## 20.11.0
+
+### New Modules
+ - na_ontap_metrocluster_dr_group: Configure a Metrocluster DR group (Supports ONTAP 9.8+)
+
+### Minor changes
+ - na_ontap_cifs - output `modified` if a modify action is taken.
+ - na_ontap_cluster_peer: optional parameter 'ipspace' added for cluster peer.
+ - na_ontap_info - do not require write access privileges. This also enables other modules to work in check_mode without write access permissions.
+ - na_ontap_lun - support modify for space_allocation and space_reserve.
+ - na_ontap_mcc_mediator - improve error reporting when REST is not available.
+ - na_ontap_metrocluster - improve error reporting when REST is not available.
+ - na_ontap_wwpn_alias - improve error reporting when REST is not available.
+ - na_ontap_software_update - add `force_update` option to ignore current version.
+ - na_ontap_svm - output `modified` if a modify action is taken.
+ - all ZAPI modules - optimize Basic Authentication by adding Authorization header proactively.
+ - This can be disabled by setting the `classic_basic_authorization` feature_flag to True.
+
+### Bug fixes
+ - All REST modules, will not fail if a job fails
+ - na_ontap_cifs - fix idempotency issue when `show-previous-versions` is used.
+ - na_ontap_firmware_upgrade - fix ValueError issue when processing URL error.
+ - na_ontap_info - Use `node-id` as key rather than `current-version`.
+ - na_ontap_ipspace - invalid call in error reporting (double error).
+ - na_ontap_lun - `use_exact_size` to create a lun with the exact given size so that the lun is not rounded up.
+ - na_ontap_metrocluster: Fix issue where module would fail on waiting for rest api job
+ - na_ontap_software_update - module is not idempotent.
+
+## 20.10.0
+
+### New Options
+- na_ontap_rest_info: Support for gather subsets - `application_info, application_template_info, autosupport_config_info , autosupport_messages_history, ontap_system_version, storage_flexcaches_info, storage_flexcaches_origin_info, storage_ports_info, storage_qos_policies, storage_qtrees_config, storage_quota_reports, storage_quota_policy_rules, storage_shelves_config, storage_snapshot_policies, support_ems_config, support_ems_events, support_ems_filters`
+
+### Bug fixes
+- na_ontap_aggregate: support concurrent actions for rename/modify/add_object_store and create/add_object_store.
+- na_ontap_cluster: `single_node_cluster` option was ignored.
+- na_ontap_info: better reporting on KeyError traceback, option to ignore error.
+- na_ontap_info: KeyError on `tree` for quota_report_info.
+- na_ontap_snapmirror_policy: report error when attempting to change `policy_type` rather than taking no action.
+- na_ontap_volume: `encrypt: false` is ignored when creating a volume.
+
+## 20.9.0
+
+### New Modules
+- na_ontap_active_directory: configure active directory.
+- na_ontap_mcc_mediator: Configure a MCC Mediator (Supports ONTAP 9.8+).
+- na_ontap_metrocluster: Configure a metrocluster (Supports ONTAP 9.8+).
+
+### New Options
+- na_ontap_cluster: `node_name` to set the node name when adding a node, or as an alternative to `cluster_ip_address` to remove a node.
+- na_ontap_cluster: `state` can be set to `absent` to remove a node identified with `cluster_ip_address` or `node_name`.
+- na_ontap_qtree: `wait_for_completion` and `time_out` to wait for qtree deletion when using REST.
+- na_ontap_quotas: `soft_disk_limit` and `soft_file_limit` for the quota target.
+- na_ontap_rest_info: Support for gather subsets - `initiator_groups_info, san_fcp_services, san_iscsi_credentials, san_iscsi_services, san_lun_maps, storage_luns_info, storage_NVMe_namespaces.`
+
+### Bug fixes
+- na_ontap_cluster: `check_mode` is now working properly.
+- na_ontap_interface: `home_node` is not required in pre-cluster mode.
+- na_ontap_interface: `role` is not required if `service_policy` is present and ONTAP version is 9.8.
+- na_ontap_interface: traceback in get_interface if node is not reachable.
+- na_ontap_job_schedule: allow 'job_minutes' to set number to -1 for job creation with REST too.
+- na_ontap_qtree: fixed `None is not subscriptable` exception on rename operation.
+- na_ontap_volume: fixed `KeyError` exception on `size` when reporting creation error.
+- na_ontap_*: change version_added: '2.6' to version_added: 2.6.0 where applicable to satisfy sanity checker.
+- netapp.py: uncaught exception (traceback) on zapi.NaApiError.
+
+## 20.8.0
+
+### New Modules
+- na_ontap_file_directory_policy: create, modify, delete vserver security file directory policy/task.
+- na_ontap_ssh_command: send CLI command over SSH using paramiko for corner cases where ZAPI or REST are not yet ready.
+- na_ontap_wait_for_condition: wait for event to be present or absent (currently sp_upgrade/in_progress and sp_version).
+
+### New Options
+- na_ontap_aggregate: support `disk_size_with_unit` option.
+- na_ontap_ldap_client: support `ad_domain` and `preferred_ad_server` options.
+- na_ontap_rest_info: Support for gather subsets - `cloud_targets_info, cluster_chassis_info, cluster_jobs_info, cluster_metrics_info, cluster_schedules, broadcast_domains_info, cluster_software_history, cluster_software_packages, network_ports_info, ip_interfaces_info, ip_routes_info, ip_service_policies, network_ipspaces_info, san_fc_logins_info, san_fc_wppn-aliases, svm_dns_config_info, svm_ldap_config_info, svm_name_mapping_config_info, svm_nis_config_info, svm_peers_info, svm_peer-permissions_info`.
+- na_ontap_rest_info: Support for gather subsets for 9.8+ - `cluster_metrocluster_diagnostics.
+- na_ontap_qtree: `force_delete` option with a DEFAULT of `true` so that ZAPI behavior is aligned with REST.
+- na_ontap_security_certificates:`ignore_name_if_not_supported` option to not fail if `name` is present since `name` is not supported in ONTAP 9.6 and 9.7.
+- na_ontap_software_update: added `timeout` option to give enough time for the update to complete.
+
+### Bug fixes
+- na_ontap_aggregate: `disk-info` error when using `disks` option.
+- na_ontap_autosupport_invoke: `message` has changed to `autosupport_message` as Redhat has reserved this word. `message` has been alias'd to `autosupport_message`.
+- na_ontap_cifs_vserver: fix documentation and add more examples.
+- na_ontap_cluster: module was not idempotent when changing location or contact information.
+- na_ontap_igroup: idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase).
+- na_ontap_igroup_initiator: idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase).
+- na_ontap_security_certificates: allows (`common_name`, `type`) as an alternate key since `name` is not supported in ONTAP 9.6 and 9.7.
+- na_ontap_info: Fixed error causing module to fail on `metrocluster_check_info`, `env_sensors_info` and `volume_move_target_aggr_info`.
+- na_ontap_snapmirror: fixed KeyError when accessing `relationship_type` parameter.
+- na_ontap_snapmirror_policy: fixed a race condition when creating a new policy.
+- na_ontap_snapmirror_policy: fixed idempotency issue withis_network_compression_enabled for REST.
+- na_ontap_software_update: ignore connection errors during update as nodes cannot be reachable.
+- na_ontap_user: enable lock state and password to be set in the same task for existing user.
+- na_ontap_volume: issue when snapdir_access and atime_update not passed together.
+- na_ontap_vscan_on_access_policy: `bool` type was not properly set for `scan_files_with_no_ext`.
+- na_ontap_vscan_on_access_policy: `policy_status` enable/disable option was not supported.
+- na_ontap_vscan_on_demand_task: `file_ext_to_include` was not handled properly.
+- na_ontap_vscan_scanner_pool_policy: scanner_pool apply policy support on modification.
+- na_ontap_vserver_create(role): lif creation now defaults to system-defined unless iscsi lif type.
+- use_rest supports case insensitive.
+
+### Module documentation changes
+- use a three group format for `version_added`. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+- add `type:` and `elements:` information where missing.
+- update `required:` information.
+
+## 20.7.0
+
+### New Modules
+- na_ontap_security_certificates: Install, create, sign, delete security certificates.
+
+### New Options:
+- na_ontap_info: support `continue_on_error` option to continue when a ZAPI is not supported on a vserver, or for cluster RPC errors.
+- na_ontap_info: support `query` option to specify which objects to return.
+- na_ontap_info: support `vserver` tunneling to limit output to one vserver.
+- na_ontap_snapmirror_policy: support for SnapMirror policy rules.
+- na_ontap_vscan_scanner_pool: support modification.
+- na_ontap_rest_info: Support for gather subsets - `cluster_node_info, cluster_peer_info, disk_info, cifs_services_info, cifs_share_info`.
+- module_utils/netapp: add retry on wait_on_job when job failed. Abort 3 consecutive errors.
+
+### Bug fixes:
+- na_ontap_command: replace invalid backspace characters (0x08) with '.'.
+- na_ontap_firmware_download: exception on PCDATA if ONTAP returns a BEL (0x07) character.
+- na_ontap_info: lists were incorrectly processed in convert_keys, returning {}.
+- na_ontap_info: qtree_info is missing most entries. Changed key from `vserver:id` to `vserver:volume:id` .
+- na_ontap_iscsi_security: adding no_log for password parameters.
+- na_ontap_portset: adding explicit error message as modify portset is not supported.
+- na_ontap_snapmirror: fixed snapmirror delete for loadsharing to not go to quiesce state for the rest of the set.
+- na_ontap_ucadapter: fixed KeyError if type is not provided and mode is 'cna'.
+- na_ontap_user: checked `applications` does not contain snmp when using REST API call.
+- na_ontap_user: fixed KeyError if locked key not set with REST API call.
+- na_ontap_user: fixed KeyError if vserver: is empty with REST API call (useful to indicate cluster scope).
+- na_ontap_volume: fixed KeyError when getting info on a MVD volume
+
+### Example playbook
+- na_ontap_pb_get_online_volumes.yml: list of volumes that are online (or offline).
+- na_ontap_pb_install_SSL_certificate_REST.yml: installing SSL certificate using REST APIs.
+
+## 20.6.1
+
+### New Options:
+- na_ontap_firmware_upgrade: `reboot_sp`: reboot service processor before downloading package.
+- na_ontap_firmware_upgrade: `rename_package`: rename file when downloading service processor package.
+- na_ontap_firmware_upgrade: `replace_package`: replace local file when downloading service processor package.
+
+### Bug Fixes
+- na_ontap_firmware_upgrade: images are not downloaded, but the module reports success.
+- na_ontap_user: fixed KeyError if password is not provided.
+- na_ontap_password: do not error out if password is identical to previous password (idempotency).
+
+## 20.6.0
+
+### Support for SSL certificate authentication in addition to password
+The ONTAP Ansible modules currently require a username/password combination to authenticate with ONTAPI or REST APIs.
+It is now possible to use SSL certificate authentication with ONTAPI or REST.
+You will first need to install a SSL certificate in ONTAP, see for instance the first part of:
+https://netapp.io/2016/11/08/certificate-based-authentication-netapp-manageability-sdk-ontap/
+The applications that need to be authorized for `cert` are `ontapi` and `http`.
+
+The new `cert_filepath`, `key_filepath` options enable SSL certificate authentication.
+This is mutually exclusive with using `username` and `password`.
+
+ONTAP does not support `cert` authentication for console, so this is not supported for `na_ontap_command`.
+
+SSL certificate authentication requires python2.7 or 3.x.
+
+### New Options
+- na_ontap_disks: `disk_type` option allows to assign specified type of disk.
+- na_ontap_firmware_upgrade: ignore timeout when downloading image unless `fail_on_502_error` is set to true.
+- na_ontap_info: `desired_attributes` advanced feature to select which fields to return.
+- na_ontap_info: `use_native_zapi_tags` to disable the conversion of '_' to '-' for attribute keys.
+- na_ontap_rest_info: `fields` options to request specific fields from subset.
+- na_ontap_software_update: `stabilize_minutes` option specifies number of minutes needed to stabilize node before update.
+- na_ontap_snapmirror: now performs restore with optional field `source_snapshot` for specific snapshot or uses latest.
+- na_ontap_ucadapter: `pair_adapters` option allows specifying the list of adapters which also need to be offline.
+- na_ontap_user: `authentication_password` option specifies password for the authentication protocol of SNMPv3 user.
+- na_ontap_user: `authentication_protocol` option specifies authentication protocol fo SNMPv3 user.
+- na_ontap_user: `engine_id` option specifies authoritative entity's EngineID for the SNMPv3 user.
+- na_ontap_user: `privacy_password` option specifies password for the privacy protocol of SNMPv3 user.
+- na_ontap_user: `privacy_protocol` option specifies privacy protocol of SNMPv3 user.
+- na_ontap_user: `remote_switch_ipaddress` option specifies the IP Address of the remote switch of SNMPv3 user.
+- na_ontap_volume: `check_interval` option checks if a volume move has been completed and then waits this number of seconds before checking again.
+- na_ontap_volume: `auto_remap_luns` option controls automatic mapping of LUNs during volume rehost.
+- na_ontap_volume: `force_restore` option forces volume to restore even if the volume has one or more newer Snapshotcopies.
+- na_ontap_volume: `force_unmap_luns` option controls automatic unmapping of LUNs during volume rehost.
+- na_ontap_volume: `from_vserver` option allows volume rehost from one vserver to another.
+- na_ontap_volume: `preserve_lun_ids` option controls LUNs in the volume being restored will remain mapped and their identities preserved.
+- na_ontap_volume: `snapshot_restore` option specifies name of snapshot to restore from.
+- all modules: `cert_filepath`, `key_filepath` to enable SSL certificate authentication (python 2.7 or 3.x).
+
+### Bug Fixes
+- na_ontap_firmware_upgrade: ignore timeout when downloading firmware images by default.
+- na_ontap_info: conversion from '-' to '_' was not done for lists of dictionaries.
+- na_ontap_ntfs_dacl: example fix in documentation string.
+- na_ontap_snapmirror: could not delete all rules (bug in netapp_module).
+- na_ontap_volume: modify was invoked multiple times when once is enough.
+- na_ontap_volume: fix KeyError on 'style' when volume is of type: data-protection.
+- na_ontap_volume: `wait_on_completion` is supported with volume moves.
+- module_utils/netapp_module: cater for empty lists in get_modified_attributes().
+- module_utils/netapp_module: cater for lists with duplicate elements in compare_lists().
+
+### Example playbook
+- na_ontap_pb_install_SSL_certificate.yml: installing a self-signed SSL certificate, and enabling SSL certificate authentication.
+
+### Added REST support to existing modules
+- na_ontap_user: added REST support for ONTAP user creation, modification & deletion.
+
+
+## 20.5.0
+
+### New Options:
+- na_ontap_aggregate: `raid_type` options supports 'raid_0' for ONTAP Select.
+- na_ontap_cluster_peer: `encryption_protocol_proposed` option allows specifying encryption protocol to be used for inter-cluster communication.
+- na_ontap_info: new fact: aggr_efficiency_info.
+- na_ontap_info: new fact: cluster_switch_info.
+- na_ontap_info: new fact: disk_info.
+- na_ontap_info: new fact: env_sensors_info.
+- na_ontap_info: new fact: net_dev_discovery_info.
+- na_ontap_info: new fact: service_processor_info.
+- na_ontap_info: new fact: shelf_info.
+- na_ontap_info: new fact: sis_info.
+- na_ontap_info: new fact: subsys_health_info.
+- na_ontap_info: new fact: sysconfig_info.
+- na_ontap_info: new fact: sys_cluster_alerts.
+- na_ontap_info: new fact: volume_move_target_aggr_info.
+- na_ontap_info: new fact: volume_space_info.
+- na_ontap_nvme_namespace: `block_size` option allows specifying size in bytes of a logical block.
+- na_ontap_snapmirror: snapmirror now allows resume feature.
+- na_ontap_volume: `cutover_action` option allows specifying the action to be taken for cutover.
+
+### Bug Fixes
+- REST API call now honors the `http_port` parameter.
+- REST API detection now works with vserver (use_rest: Auto).
+- na_ontap_autosupport_invoke: when using ZAPI and name is not given, send autosupport message to all nodes in the cluster.
+- na_ontap_cg_snapshot: properly states it does not support check_mode.
+- na_ontap_cluster: ONTAP 9.3 or earlier does not support ZAPI element single-node-cluster.
+- na_ontap_cluster_ha: support check_mode.
+- na_ontap_cluster_peer: support check_mode.
+- na_ontap_cluster_peer: EMS log wrongly uses destination credentials with source hostname.
+- na_ontap_disks: support check_mode.
+- na_ontap_dns: support check_mode.
+- na_ontap_efficiency_policy: change `duration` type from int to str to support '-' input.
+- na_ontap_fcp: support check_mode.
+- na_ontap_flexcache: support check_mode.
+- na_ontap_info: `metrocluster_check_info` does not trigger a traceback but adds an "error" info element if the target system is not set up for metrocluster.
+- na_ontap_license: support check_mode.
+- na_ontap_login_messages: fix documentation link.
+- na_ontap_node: support check mode.
+- na_ontap_ntfs_sd: documentation string update for examples and made sure owner or group not mandatory.
+- na_ontap_ports: now support check mode.
+- na_ontap_restit: error can be a string in addition to a dict. This fix removes a traceback with AttributeError.
+- na_ontap_routes: support Check Mode correctly.
+- na_ontap_snapmirror: support check_mode.
+- na_ontap_software_update: Incorrectly stated that it support check mode, it does not.
+- na_ontap_svm_options: support check_mode.
+- na_ontap_volume: improve error reporting if required parameter is present but not set.
+- na_ontap_volume: suppress traceback in wait_for_completion as volume may not be completely ready.
+- na_ontap_volume: fix KeyError on 'style' when volume is offline.
+- na_ontap_volume_autosize: Support check_mode when `reset` option is given.
+- na_ontap_volume_snaplock: fix documentation link.
+- na_ontap_vserver_peer: support check_mode.
+- na_ontap_vserver_peer: EMS log wrongly uses destination credentials with source hostname.
+
+### New Modules
+- na_ontap_rest_info: Gather ONTAP subset information using REST APIs (9.6 and Above).
+
+### Role Change
+- na_ontap_cluster_config: Port Flowcontrol and autonegotiate can be set in role
+
+## 20.4.1
+
+### New Options
+- na_ontap_firmware_upgrade: `force_disruptive_update` and `package_url` options allows to make choices for download and upgrading packages.
+
+### Added REST support to existing modules
+- na_ontap_autosupport_invoke: added REST support for sending autosupport message.
+
+### Bug Fixes
+- na_ontap_volume: `volume_security_style` option now allows modify.
+- na_ontap_info: `metrocluster_check_info` has been removed as it was breaking the info module for everyone who didn't have a metrocluster set up. We are working on adding this back in a future update
+
+### Role Changes
+- na_ontap_vserver_create has a new default variable `netapp_version` set to 140. If you are running 9.2 or below please add the variable to your playbook and set to 120
+
+## 20.4.0
+
+### New Options
+- na_ontap_aggregate: `disk_count` option allows adding additional disk to aggregate.
+- na_ontap_info: `max_records` option specifies maximum number of records returned in a single ZAPI call.
+- na_ontap_info: `summary` option specifies a boolean flag to control return all or none of the info attributes.
+- na_ontap_info: new fact: iscsi_service_info.
+- na_ontap_info: new fact: license_info.
+- na_ontap_info: new fact: metrocluster_info.
+- na_ontap_info: new fact: metrocluster_check_info.
+- na_ontap_info: new fact: metrocluster_node_info.
+- na_ontap_info: new fact: net_interface_service_policy_info.
+- na_ontap_info: new fact: ontap_system_version.
+- na_ontap_info: new fact: ontapi_version (and deprecate ontap_version, both fields are reported for now).
+- na_ontap_info: new fact: qtree_info.
+- na_ontap_info: new fact: quota_report_info.
+- na_ontap_info: new fact: snapmirror_destination_info.
+- na_ontap_interface: `service_policy` option to identify a single service or a list of services that will use a LIF.
+- na_ontap_kerberos_realm: `ad_server_ip` option specifies IP Address of the Active Directory Domain Controller (DC).
+- na_ontap_kerberos_realm: `ad_server_name` option specifies Host name of the Active Directory Domain Controller (DC).
+- na_ontap_snapmirror_policy: REST is included and all defaults are removed from options.
+- na_ontap_snapmirror: `relationship-info-only` option allows to manage relationship information.
+- na_ontap_software_update: `download_only` options allows to download cluster image without software update.
+- na_ontap_volume: `snapshot_auto_delete` option allows to manage auto delete settings of a specified volume.
+
+### Bug Fixes
+- na_ontap_cifs_server: delete AD account if username and password are provided when state=absent
+- na_ontap_info: return all records of each gathered subset.
+- na_ontap_info: cifs_server_info: fix KeyError exception on `domain` if only `domain-workgroup` is present.
+- na_ontap_iscsi_security: Fixed modify functionality for CHAP and typo correction
+- na_ontap_kerberos_realm: fix `kdc_vendor` case sensitivity issue.
+- na_ontap_snapmirror: calling quiesce before snapmirror break.
+
+### New Modules
+- na_ontap_autosupport_invoke: send autosupport message.
+- na_ontap_ntfs_dacl: create/modify/delete ntfs dacl (discretionary access control list).
+- na_ontap_ntfs_sd: create/modify/delete ntfs security descriptor.
+- na_ontap_restit: send any REST API request to ONTAP (9.6 and above).
+- na_ontap_snmp_traphosts: Create and delete snmp traphosts (9.7 and Above)
+- na_ontap_wwpn_alias: create/modify/delete vserver fcp wwpn-alias.
+- na_ontap_zapit: send any ZAPI request to ONTAP.
+
+## 20.3.0
+
+### New Options
+- na_ontap_info: New info's added `cluster_identity_info`
+- na_ontap_info: New info's added `storage_bridge_info`
+- na_ontap_snapmirror: performs resync when the `relationship_state` is active and the current state is broken-off.
+
+### Bug Fixes
+- na_ontap_vscan_scanner_pool: has been updated to match the standard format used for all other ontap modules
+- na_ontap_volume_snaplock: Fixed KeyError exception on 'is-volume-append-mode-enabled'
+
+### New Modules
+- na_ontap_snapmirror_policy: create/modify/delete snapmirror policy.
+
+## 20.2.0
+
+### New Modules
+- na_ontap_volume_snaplock: modify volume snaplock retention.
+
+### New Options
+- na_ontap_info: New info's added `snapshot_info`
+- na_ontap_info: `max_records` option to set maximum number of records to return per subset.
+- na_ontap_snapmirror: `relationship_state` option for breaking the snapmirror relationship.
+- na_ontap_snapmirror: `update_snapmirror` option for updating the snapmirror relationship.
+- na_ontap_volume_clone: `split` option to split clone volume from parent volume.
+
+### Bug Fixes
+- na_ontap_cifs_server: Fixed KeyError exception on 'cifs_server_name'
+- na_ontap_command: fixed traceback when using return_dict if u'1' is present in result value.
+- na_ontap_login_messages: Fixed example documentation and spelling mistake issue
+- na_ontap_nvme_subsystem: fixed bug when creating subsystem, vserver was not filtered.
+- na_ontap_svm: if snapshot policy is changed, modify fails with "Extra input: snapshot_policy"
+- na_ontap_svm: if language: C.UTF-8 is specified, the module is not idempotent
+- na_ontap_volume_clone: fixed 'Extra input: parent-vserver' error when running as cluster admin.
+- na_ontap_qtree: Fixed issue with Get function for REST
+
+### Role Changes
+- na_ontap_nas_create role: fix typo in README file, add CIFS example.
+
+## 20.1.0
+
+### New Modules
+- na_ontap_login_messages: create/modify/delete security login messages including banner and mtod.
+
+### New Options
+- na_ontap_aggregate: add `snaplock_type`.
+- na_ontap_info: New info's added `cifs_server_info`, `cifs_share_info`, `cifs_vserver_security_info`, `cluster_peer_info`, `clock_info`, `export_policy_info`, `export_rule_info`, `fcp_adapter_info`, `fcp_alias_info`, `fcp_service_info`, `job_schedule_cron_info`, `kerberos_realm_info`, `ldap_client`, `ldap_config`, `net_failover_group_info`, `net_firewall_info`, `net_ipspaces_info`, `net_port_broadcast_domain_info`, `net_routes_info`, `net_vlan_info`, `nfs_info`, `ntfs_dacl_info`, `ntfs_sd_info`, `ntp_server_info`, `role_info`, `service_processor_network_info`, `sis_policy_info`, `snapmirror_policy_info`, `snapshot_policy_info`, `vscan_info`, `vserver_peer_info`
+- na_ontap_igroup_initiator: `force_remove` to forcibly remove initiators from an igroup that is currently mapped to a LUN.
+- na_ontap_interface: `failover_group` to specify the failover group for the LIF. `is_ipv4_link_local` to specify the LIF's are to acquire a ipv4 link local address.
+- na_ontap_rest_cli: add OPTIONS as a supported verb and return list of allowed verbs.
+- na_ontap_volume: add `group_id` and `user_id`.
+
+### Bug Fixes
+- na_ontap_aggregate: Fixed traceback when running as vsadmin and cleanly error out.
+- na_ontap_command: stdout_lines_filter contains data only if include/exlude_lines parameter is used. (zeten30)
+- na_ontap_command: stripped_line len is checked only once, filters are inside if block. (zeten30)
+- na_ontap_interface: allow module to run on node before joining the cluster.
+- na_ontap_net_ifgrp: Fixed error for na_ontap_net_ifgrp if no port is given.
+- na_ontap_snapmirror: Fixed traceback when running as vsadmin. Do not attempt to break a relationship that is 'Uninitialized'.
+- na_ontap_snapshot_policy: Fixed KeyError: 'prefix' bug when prefix parameter isn't supplied.
+- na_ontap_volume: Fixed error reporting if efficiency policy cannot be read. Do not attempt to read efficiency policy if not needed.
+- na_ontap_volume: Fixed error when modifying volume efficiency policy.
+- na_ontap_volume_clone: Fixed KeyError exception on 'volume'
+
+### Added REST support to existing modules
+- na_ontap_dns: added REST support for dns creation and modification on cluster vserver.
+
+### Role Changes
+
+## 19.11.0
+
+### New Modules
+- na_ontap_quota_policy: create/rename/delete quota policy.
+
+### New Options
+- na_ontap_cluster: added single node cluster option, also now supports for modify cluster contact and location option.
+- na_ontap_info: Now allow you use to VSadmin to get info (Must user `vserver` option).
+- na_ontap_info: Added `vscan_status_info`, `vscan_scanner_pool_info`, `vscan_connection_status_all_info`, `vscan_connection_extended_stats_info`
+- na_ontap_efficiency_policy: `changelog_threshold_percent` to set the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour.
+
+### Bug Fixes
+- na_ontap_cluster: autosupport log pushed after cluster create is performed, removed license add or remove option.
+- na_ontap_dns: report error if modify or delete operations are attempted on cserver when using REST. Make create operation idempotent for cserver when using REST. Support for modify/delete on cserver when using REST will be added later.
+- na_ontap_firewall_policy: portmap added as a valid service
+- na_ontap_net_routes: REST does not support the 'metric' attribute
+- na_ontap_snapmirror: added initialize boolean option which specifies whether to initialize SnapMirror relation.
+- na_ontap_volume: fixed error when deleting flexGroup volume with ONTAP 9.7.
+- na_ontap_volume: tiering option requires 9.4 or later (error on volume-comp-aggr-attributes)
+- na_ontap_vscan_scanner_pool: fix module only gets one scanner pool.
+
+### Added REST support to existing modules
+
+### Role Changes
+
+## 19.10.0
+Changes in 19.10.0 and September collection releases compared to Ansible 2.9
+
+### New Modules
+- na_ontap_name_service_switch: create/modify/delete name service switch configuration.
+- na_ontap_iscsi_security: create/modify/delete iscsi security.
+
+### New Options
+- na_ontap_command: `vserver`: to allow command to run as either cluster admin or vserver admin. To run as vserver admin you must use the vserver option.
+- na_ontap_motd: rename `message` to `motd_message` to avoid conflict with Ansible internal variable name.
+- na_ontap_nvme_namespace: `size_unit` to specify size in different units.
+- na_ontap_snapshot_policy: `prefix`: option to use for creating snapshot policy.
+
+### Bug Fixes
+- na_ontap_ndmp: minor documentation changes for restore_vm_cache_size and data_port_range.
+- na_ontap_qtree: REST API takes "unix_permissions" as parameter instead of "mode".
+- na_ontap_qtree: unix permission is not available when security style is ntfs
+- na_ontap_user: minor documentation update for application parameter.
+- na_ontap_volume: `efficiency_policy` was ignored
+- na_ontap_volume: enforce that space_slo and space_guarantee are mutually exclusive
+- na_ontap_svm: "allowed_protocols" added to param in proper way in case of using REST API
+- na_ontap_firewall_policy: documentation changed for supported service parameter.
+- na_ontap_net_subnet: fix ip_ranges option fails on existing subnet.
+- na_ontap_snapshot_policy: fix vsadmin approach for managing snapshot policy.
+- na_ontap_nvme_subsystem: fix fetching unique nvme subsytem based on vserver filter.
+- na ontap_net_routes: change metric type from string to int.
+- na_ontap_cifs_server: minor documentation changes correction of create example with "name" parameter and adding type to parameters.
+- na_ontap_vserver_cifs_security: fix int and boolean options when modifying vserver cifs security.
+- na_ontap_net_subnet: fix rename idempotency issue and updated rename check.
+
+### Added REST support to existing modules
+By default, the module will use REST if the target system supports it, and the options are supported. Otherwise, it will switch back to ZAPI. This behavior can be controlled with the `use_rest` option:
+1. Always: to force REST. The module fails and reports an error if REST cannot be used.
+1. Never: to force ZAPI. This could be useful if you find some incompatibility with REST, or want to confirm the behavior is identical between REST and ZAPI.
+1. Auto: the default, as described above.
+
+- na_ontap_ipspace
+- na_ontap_export_policy
+- na_ontap_ndmp
+-- Note: only `enable` and `authtype` are supported with REST
+- na_ontap_net_routes
+- na_ontap_qtree
+-- Note: `oplocks` is not supported with REST, defaults to enable.
+- na_ontap_svm
+-- Note: `root_volume`, `root_volume_aggregate`, `root_volume_security_style` are not supported with REST.
+- na_ontap_job_schedule
+
+### Role Changes
+- na_ontap_cluster_config updated to all cleaner playbook
+- na_ontap_vserver_create updated to all cleaner playbook
+- na_ontap_nas_create updated to all cleaner playbook
+- na_ontap_san_create updated to all cleaner playbook
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/.plugin-cache.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/.plugin-cache.yaml
new file mode 100644
index 00000000..b11c6da4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/.plugin-cache.yaml
@@ -0,0 +1,520 @@
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ na_ontap_active_directory:
+ description: NetApp ONTAP configure active directory
+ name: na_ontap_active_directory
+ namespace: ''
+ version_added: 20.9.0
+ na_ontap_aggregate:
+ description: NetApp ONTAP manage aggregates.
+ name: na_ontap_aggregate
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_autosupport:
+ description: NetApp ONTAP Autosupport
+ name: na_ontap_autosupport
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_autosupport_invoke:
+ description: NetApp ONTAP send AutoSupport message
+ name: na_ontap_autosupport_invoke
+ namespace: ''
+ version_added: 20.4.0
+ na_ontap_broadcast_domain:
+ description: NetApp ONTAP manage broadcast domains.
+ name: na_ontap_broadcast_domain
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_broadcast_domain_ports:
+ description: NetApp ONTAP manage broadcast domain ports
+ name: na_ontap_broadcast_domain_ports
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_cg_snapshot:
+ description: NetApp ONTAP manage consistency group snapshot
+ name: na_ontap_cg_snapshot
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_cifs:
+ description: NetApp ONTAP Manage cifs-share
+ name: na_ontap_cifs
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_cifs_acl:
+ description: NetApp ONTAP manage cifs-share-access-control
+ name: na_ontap_cifs_acl
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_cifs_server:
+ description: NetApp ONTAP CIFS server configuration
+ name: na_ontap_cifs_server
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_cluster:
+ description: NetApp ONTAP cluster - create a cluster and add/remove nodes.
+ name: na_ontap_cluster
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_cluster_ha:
+ description: NetApp ONTAP Manage HA status for cluster
+ name: na_ontap_cluster_ha
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_cluster_peer:
+ description: NetApp ONTAP Manage Cluster peering
+ name: na_ontap_cluster_peer
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_command:
+ description: NetApp ONTAP Run any cli command, the username provided needs to
+ have console login permission.
+ name: na_ontap_command
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_disks:
+ description: NetApp ONTAP Assign disks to nodes
+ name: na_ontap_disks
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_dns:
+ description: NetApp ONTAP Create, delete, modify DNS servers.
+ name: na_ontap_dns
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_efficiency_policy:
+ description: NetApp ONTAP manage efficiency policies (sis policies)
+ name: na_ontap_efficiency_policy
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_export_policy:
+ description: NetApp ONTAP manage export-policy
+ name: na_ontap_export_policy
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_export_policy_rule:
+ description: NetApp ONTAP manage export policy rules
+ name: na_ontap_export_policy_rule
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_fcp:
+ description: NetApp ONTAP Start, Stop and Enable FCP services.
+ name: na_ontap_fcp
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_file_directory_policy:
+ description: NetApp ONTAP create, delete, or modify vserver security file-directory
+ policy
+ name: na_ontap_file_directory_policy
+ namespace: ''
+ version_added: 20.8.0
+ na_ontap_firewall_policy:
+ description: NetApp ONTAP Manage a firewall policy
+ name: na_ontap_firewall_policy
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_firmware_upgrade:
+ description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk.
+ name: na_ontap_firmware_upgrade
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_flexcache:
+ description: NetApp ONTAP FlexCache - create/delete relationship
+ name: na_ontap_flexcache
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_igroup:
+ description: NetApp ONTAP iSCSI or FC igroup configuration
+ name: na_ontap_igroup
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_igroup_initiator:
+ description: NetApp ONTAP igroup initiator configuration
+ name: na_ontap_igroup_initiator
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_info:
+ description: NetApp information gatherer
+ name: na_ontap_info
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_interface:
+ description: NetApp ONTAP LIF configuration
+ name: na_ontap_interface
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_ipspace:
+ description: NetApp ONTAP Manage an ipspace
+ name: na_ontap_ipspace
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_iscsi:
+ description: NetApp ONTAP manage iSCSI service
+ name: na_ontap_iscsi
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_iscsi_security:
+ description: NetApp ONTAP Manage iscsi security.
+ name: na_ontap_iscsi_security
+ namespace: ''
+ version_added: 19.10.1
+ na_ontap_job_schedule:
+ description: NetApp ONTAP Job Schedule
+ name: na_ontap_job_schedule
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_kerberos_realm:
+ description: NetApp ONTAP vserver nfs kerberos realm
+ name: na_ontap_kerberos_realm
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_ldap:
+ description: NetApp ONTAP LDAP
+ name: na_ontap_ldap
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_ldap_client:
+ description: NetApp ONTAP LDAP client
+ name: na_ontap_ldap_client
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_license:
+ description: NetApp ONTAP protocol and feature licenses
+ name: na_ontap_license
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_login_messages:
+ description: Setup login banner and message of the day
+ name: na_ontap_login_messages
+ namespace: ''
+ version_added: 20.1.0
+ na_ontap_lun:
+ description: NetApp ONTAP manage LUNs
+ name: na_ontap_lun
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_lun_copy:
+ description: NetApp ONTAP copy LUNs
+ name: na_ontap_lun_copy
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_lun_map:
+ description: NetApp ONTAP LUN maps
+ name: na_ontap_lun_map
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_mcc_mediator:
+ description: NetApp ONTAP Add and Remove MetroCluster Mediator
+ name: na_ontap_mcc_mediator
+ namespace: ''
+ version_added: 20.9.0
+ na_ontap_metrocluster:
+ description: NetApp ONTAP set up a MetroCluster
+ name: na_ontap_metrocluster
+ namespace: ''
+ version_added: 20.9.0
+ na_ontap_motd:
+ description: Setup motd
+ name: na_ontap_motd
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_name_service_switch:
+ description: NetApp ONTAP Manage name service switch
+ name: na_ontap_name_service_switch
+ namespace: ''
+ version_added: null
+ na_ontap_ndmp:
+ description: NetApp ONTAP NDMP services configuration
+ name: na_ontap_ndmp
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_net_ifgrp:
+ description: NetApp Ontap modify network interface group
+ name: na_ontap_net_ifgrp
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_net_port:
+ description: NetApp ONTAP network ports.
+ name: na_ontap_net_port
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_net_routes:
+ description: NetApp ONTAP network routes
+ name: na_ontap_net_routes
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_net_subnet:
+ description: NetApp ONTAP Create, delete, modify network subnets.
+ name: na_ontap_net_subnet
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_net_vlan:
+ description: NetApp ONTAP network VLAN
+ name: na_ontap_net_vlan
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_nfs:
+ description: NetApp ONTAP NFS status
+ name: na_ontap_nfs
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_node:
+ description: NetApp ONTAP Rename a node.
+ name: na_ontap_node
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_ntfs_dacl:
+ description: NetApp Ontap create, delate or modify NTFS DACL (discretionary
+ access control list)
+ name: na_ontap_ntfs_dacl
+ namespace: ''
+ version_added: 20.4.0
+ na_ontap_ntfs_sd:
+ description: NetApp ONTAP create, delete or modify NTFS security descriptor
+ name: na_ontap_ntfs_sd
+ namespace: ''
+ version_added: 20.4.0
+ na_ontap_ntp:
+ description: NetApp ONTAP NTP server
+ name: na_ontap_ntp
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_nvme:
+ description: NetApp ONTAP Manage NVMe Service
+ name: na_ontap_nvme
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_nvme_namespace:
+ description: NetApp ONTAP Manage NVME Namespace
+ name: na_ontap_nvme_namespace
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_nvme_subsystem:
+ description: NetApp ONTAP Manage NVME Subsystem
+ name: na_ontap_nvme_subsystem
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_object_store:
+ description: NetApp ONTAP manage object store config.
+ name: na_ontap_object_store
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_ports:
+ description: NetApp ONTAP add/remove ports
+ name: na_ontap_ports
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_portset:
+ description: NetApp ONTAP Create/Delete portset
+ name: na_ontap_portset
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_qos_adaptive_policy_group:
+ description: NetApp ONTAP Adaptive Quality of Service policy group.
+ name: na_ontap_qos_adaptive_policy_group
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_qos_policy_group:
+ description: NetApp ONTAP manage policy group in Quality of Service.
+ name: na_ontap_qos_policy_group
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_qtree:
+ description: NetApp ONTAP manage qtrees
+ name: na_ontap_qtree
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_quota_policy:
+ description: NetApp Ontap create, rename or delete quota policy
+ name: na_ontap_quota_policy
+ namespace: ''
+ version_added: 19.11.0
+ na_ontap_quotas:
+ description: NetApp ONTAP Quotas
+ name: na_ontap_quotas
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_rest_cli:
+ description: NetApp ONTAP Run any cli command, the username provided needs to
+ have console login permission.
+ name: na_ontap_rest_cli
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_rest_info:
+ description: NetApp ONTAP information gatherer using REST APIs
+ name: na_ontap_rest_info
+ namespace: ''
+ version_added: 20.5.0
+ na_ontap_restit:
+ description: NetApp ONTAP Run any REST API on ONTAP
+ name: na_ontap_restit
+ namespace: ''
+ version_added: 20.4.0
+ na_ontap_security_certificates:
+ description: NetApp ONTAP manage security certificates.
+ name: na_ontap_security_certificates
+ namespace: ''
+ version_added: 20.7.0
+ na_ontap_security_key_manager:
+ description: NetApp ONTAP security key manager.
+ name: na_ontap_security_key_manager
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_service_processor_network:
+ description: NetApp ONTAP service processor network
+ name: na_ontap_service_processor_network
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_snapmirror:
+ description: NetApp ONTAP or ElementSW Manage SnapMirror
+ name: na_ontap_snapmirror
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_snapmirror_policy:
+ description: NetApp ONTAP create, delete or modify SnapMirror policies
+ name: na_ontap_snapmirror_policy
+ namespace: ''
+ version_added: 20.3.0
+ na_ontap_snapshot:
+ description: NetApp ONTAP manage Snapshots
+ name: na_ontap_snapshot
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_snapshot_policy:
+ description: NetApp ONTAP manage Snapshot Policy
+ name: na_ontap_snapshot_policy
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_snmp:
+ description: NetApp ONTAP SNMP community
+ name: na_ontap_snmp
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_snmp_traphosts:
+ description: NetApp ONTAP SNMP traphosts.
+ name: na_ontap_snmp_traphosts
+ namespace: ''
+ version_added: 20.3.0
+ na_ontap_software_update:
+ description: NetApp ONTAP Update Software
+ name: na_ontap_software_update
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_ssh_command:
+ description: NetApp ONTAP Run any cli command over plain SSH using paramiko.
+ name: na_ontap_ssh_command
+ namespace: ''
+ version_added: 20.8.0
+ na_ontap_svm:
+ description: NetApp ONTAP SVM
+ name: na_ontap_svm
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_svm_options:
+ description: NetApp ONTAP Modify SVM Options
+ name: na_ontap_svm_options
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_ucadapter:
+ description: NetApp ONTAP UC adapter configuration
+ name: na_ontap_ucadapter
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_unix_group:
+ description: NetApp ONTAP UNIX Group
+ name: na_ontap_unix_group
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_unix_user:
+ description: NetApp ONTAP UNIX users
+ name: na_ontap_unix_user
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_user:
+ description: NetApp ONTAP user configuration and management
+ name: na_ontap_user
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_user_role:
+ description: NetApp ONTAP user role configuration and management
+ name: na_ontap_user_role
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_volume:
+ description: NetApp ONTAP manage volumes.
+ name: na_ontap_volume
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_volume_autosize:
+ description: NetApp ONTAP manage volume autosize
+ name: na_ontap_volume_autosize
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_volume_clone:
+ description: NetApp ONTAP manage volume clones.
+ name: na_ontap_volume_clone
+ namespace: ''
+ version_added: 2.6.0
+ na_ontap_volume_snaplock:
+ description: NetApp ONTAP manage volume snaplock retention.
+ name: na_ontap_volume_snaplock
+ namespace: ''
+ version_added: 20.2.0
+ na_ontap_vscan:
+ description: NetApp ONTAP Vscan enable/disable.
+ name: na_ontap_vscan
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_vscan_on_access_policy:
+ description: NetApp ONTAP Vscan on access policy configuration.
+ name: na_ontap_vscan_on_access_policy
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_vscan_on_demand_task:
+ description: NetApp ONTAP Vscan on demand task configuration.
+ name: na_ontap_vscan_on_demand_task
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_vscan_scanner_pool:
+ description: NetApp ONTAP Vscan Scanner Pools Configuration.
+ name: na_ontap_vscan_scanner_pool
+ namespace: ''
+ version_added: 2.8.0
+ na_ontap_vserver_cifs_security:
+ description: NetApp ONTAP vserver CIFS security modification
+ name: na_ontap_vserver_cifs_security
+ namespace: ''
+ version_added: 2.9.0
+ na_ontap_vserver_peer:
+ description: NetApp ONTAP Vserver peering
+ name: na_ontap_vserver_peer
+ namespace: ''
+ version_added: 2.7.0
+ na_ontap_wait_for_condition:
+ description: NetApp ONTAP wait_for_condition. Loop over a get status request
+ until a condition is met.
+ name: na_ontap_wait_for_condition
+ namespace: ''
+ version_added: 20.8.0
+ na_ontap_wwpn_alias:
+ description: NetApp ONTAP set FCP WWPN Alias
+ name: na_ontap_wwpn_alias
+ namespace: ''
+ version_added: 20.4.0
+ na_ontap_zapit:
+ description: NetApp ONTAP Run any ZAPI on ONTAP
+ name: na_ontap_zapit
+ namespace: ''
+ version_added: 20.4.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ vars: {}
+version: 20.10.0
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/changelog.yaml
new file mode 100644
index 00000000..f75fc1a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/changelog.yaml
@@ -0,0 +1,879 @@
+ancestor: null
+releases:
+ 19.10.0:
+ changes:
+ bugfixes:
+ - na ontap_net_routes - change metric type from string to int.
+ - na_ontap_cifs_server - minor documentation changes correction of create example
+ with "name" parameter and adding type to parameters.
+ - na_ontap_firewall_policy - documentation changed for supported service parameter.
+ - na_ontap_ndmp - minor documentation changes for restore_vm_cache_size and
+ data_port_range.
+ - na_ontap_net_subnet - fix ip_ranges option fails on existing subnet.
+ - na_ontap_net_subnet - fix rename idempotency issue and updated rename check.
+ - na_ontap_nvme_subsystem - fix fetching unique nvme subsytem based on vserver
+ filter.
+ - na_ontap_qtree - REST API takes "unix_permissions" as parameter instead of
+ "mode".
+ - na_ontap_qtree - unix permission is not available when security style is ntfs
+ - na_ontap_snapshot_policy - fix vsadmin approach for managing snapshot policy.
+ - na_ontap_svm - ``allowed_protocols`` added to param in proper way in case
+ of using REST API
+ - na_ontap_user - minor documentation update for application parameter.
+ - na_ontap_volume - ``efficiency_policy`` was ignored
+ - na_ontap_volume - enforce that space_slo and space_guarantee are mutually
+ exclusive
+ - na_ontap_vserver_cifs_security - fix int and boolean options when modifying
+ vserver cifs security.
+ minor_changes:
+ - "Added REST support to existing modules.\n By default, the module will use\
+ \ REST if the target system supports it, and the options are supported. Otherwise,\
+ \ it will switch back to ZAPI.\n This behavior can be controlled with the\
+ \ ``use_rest`` option.\n Always - to force REST. The module fails and reports\
+ \ an error if REST cannot be used.\n Never - to force ZAPI. This could be\
+ \ useful if you find some incompatibility with REST, or want to confirm the\
+ \ behavior is identical between REST and ZAPI.\n Auto - the default, as described\
+ \ above.\n"
+ - na_ontap_cluster_config - role updated to support a cleaner playbook
+ - na_ontap_command - ``vserver`` - to allow command to run as either cluster
+ admin or vserver admin. To run as vserver admin you must use the vserver
+ option.
+ - na_ontap_export_policy - REST support
+ - na_ontap_ipspace - REST support
+ - na_ontap_job_schedule - REST support
+ - na_ontap_motd - rename ``message`` to ``motd_message`` to avoid conflict with
+ Ansible internal variable name.
+ - na_ontap_nas_create - role updated to support a cleaner playbook
+ - na_ontap_ndmp - REST support - only ``enable`` and ``authtype`` are supported
+ with REST
+ - na_ontap_net_routes - REST support
+ - na_ontap_nvme_namespace - ``size_unit`` to specify size in different units.
+ - na_ontap_qtree - REST support - ``oplocks`` is not supported with REST, defaults
+ to enable.
+ - na_ontap_san_create - role updated to support a cleaner playbook
+ - na_ontap_snapshot_policy - ``prefix`` - option to use for creating snapshot
+ policy.
+ - na_ontap_svm - REST support - ``root_volume``, ``root_volume_aggregate``,
+ ``root_volume_security_style`` are not supported with REST.
+ - na_ontap_vserver_create - role updated to support a cleaner playbook
+ fragments:
+ - 19.10.0.yaml
+ release_date: '2019-10-31'
+ 19.10.1:
+ modules:
+ - description: NetApp ONTAP Manage iscsi security.
+ name: na_ontap_iscsi_security
+ namespace: ''
+ release_date: '2019-11-01'
+ 19.11.0:
+ changes:
+ bugfixes:
+ - na_ontap_cluster - autosupport log pushed after cluster create is performed,
+ removed license add or remove option.
+ - na_ontap_dns - report error if modify or delete operations are attempted on
+ cserver when using REST. Make create operation idempotent for cserver when
+ using REST. Support for modify/delete on cserver when using REST will be
+ added later.
+ - na_ontap_firewall_policy - portmap added as a valid service
+ - na_ontap_net_routes - REST does not support the ``metric`` attribute
+ - na_ontap_snapmirror - added initialize boolean option which specifies whether
+ to initialize SnapMirror relation.
+ - na_ontap_volume - fixed error when deleting flexGroup volume with ONTAP 9.7.
+ - na_ontap_volume - tiering option requires 9.4 or later (error on volume-comp-aggr-attributes)
+ - na_ontap_vscan_scanner_pool - fix module only gets one scanner pool.
+ minor_changes:
+ - na_ontap_cluster - added single node cluster option, also now supports for
+ modify cluster contact and location option.
+ - na_ontap_efficiency_policy - ``changelog_threshold_percent`` to set the percentage
+ at which the changelog will be processed for a threshold type of policy, tested
+ once each hour.
+ - na_ontap_info - Added ``vscan_status_info``, ``vscan_scanner_pool_info``,
+ ``vscan_connection_status_all_info``, ``vscan_connection_extended_stats_info``
+ - na_ontap_info - Now allow you use to vsadmin to get info (Must user ``vserver``
+ option).
+ fragments:
+ - 19.11.0.yaml
+ modules:
+ - description: NetApp Ontap create, rename or delete quota policy
+ name: na_ontap_quota_policy
+ namespace: ''
+ release_date: '2019-11-14'
+ 2.6.0:
+ modules:
+ - description: NetApp ONTAP manage aggregates.
+ name: na_ontap_aggregate
+ namespace: ''
+ - description: NetApp ONTAP manage broadcast domains.
+ name: na_ontap_broadcast_domain
+ namespace: ''
+ - description: NetApp ONTAP manage broadcast domain ports
+ name: na_ontap_broadcast_domain_ports
+ namespace: ''
+ - description: NetApp ONTAP Manage cifs-share
+ name: na_ontap_cifs
+ namespace: ''
+ - description: NetApp ONTAP manage cifs-share-access-control
+ name: na_ontap_cifs_acl
+ namespace: ''
+ - description: NetApp ONTAP CIFS server configuration
+ name: na_ontap_cifs_server
+ namespace: ''
+ - description: NetApp ONTAP cluster - create a cluster and add/remove nodes.
+ name: na_ontap_cluster
+ namespace: ''
+ - description: NetApp ONTAP Manage HA status for cluster
+ name: na_ontap_cluster_ha
+ namespace: ''
+ - description: NetApp ONTAP manage export-policy
+ name: na_ontap_export_policy
+ namespace: ''
+ - description: NetApp ONTAP manage export policy rules
+ name: na_ontap_export_policy_rule
+ namespace: ''
+ - description: NetApp ONTAP iSCSI or FC igroup configuration
+ name: na_ontap_igroup
+ namespace: ''
+ - description: NetApp ONTAP LIF configuration
+ name: na_ontap_interface
+ namespace: ''
+ - description: NetApp ONTAP manage iSCSI service
+ name: na_ontap_iscsi
+ namespace: ''
+ - description: NetApp ONTAP Job Schedule
+ name: na_ontap_job_schedule
+ namespace: ''
+ - description: NetApp ONTAP protocol and feature licenses
+ name: na_ontap_license
+ namespace: ''
+ - description: NetApp ONTAP manage LUNs
+ name: na_ontap_lun
+ namespace: ''
+ - description: NetApp ONTAP LUN maps
+ name: na_ontap_lun_map
+ namespace: ''
+ - description: NetApp Ontap modify network interface group
+ name: na_ontap_net_ifgrp
+ namespace: ''
+ - description: NetApp ONTAP network ports.
+ name: na_ontap_net_port
+ namespace: ''
+ - description: NetApp ONTAP network routes
+ name: na_ontap_net_routes
+ namespace: ''
+ - description: NetApp ONTAP network VLAN
+ name: na_ontap_net_vlan
+ namespace: ''
+ - description: NetApp ONTAP NFS status
+ name: na_ontap_nfs
+ namespace: ''
+ - description: NetApp ONTAP NTP server
+ name: na_ontap_ntp
+ namespace: ''
+ - description: NetApp ONTAP manage qtrees
+ name: na_ontap_qtree
+ namespace: ''
+ - description: NetApp ONTAP service processor network
+ name: na_ontap_service_processor_network
+ namespace: ''
+ - description: NetApp ONTAP manage Snapshots
+ name: na_ontap_snapshot
+ namespace: ''
+ - description: NetApp ONTAP SNMP community
+ name: na_ontap_snmp
+ namespace: ''
+ - description: NetApp ONTAP SVM
+ name: na_ontap_svm
+ namespace: ''
+ - description: NetApp ONTAP UC adapter configuration
+ name: na_ontap_ucadapter
+ namespace: ''
+ - description: NetApp ONTAP user configuration and management
+ name: na_ontap_user
+ namespace: ''
+ - description: NetApp ONTAP user role configuration and management
+ name: na_ontap_user_role
+ namespace: ''
+ - description: NetApp ONTAP manage volumes.
+ name: na_ontap_volume
+ namespace: ''
+ - description: NetApp ONTAP manage volume clones.
+ name: na_ontap_volume_clone
+ namespace: ''
+ release_date: '2018-05-24'
+ 2.7.0:
+ modules:
+ - description: NetApp ONTAP Autosupport
+ name: na_ontap_autosupport
+ namespace: ''
+ - description: NetApp ONTAP manage consistency group snapshot
+ name: na_ontap_cg_snapshot
+ namespace: ''
+ - description: NetApp ONTAP Manage Cluster peering
+ name: na_ontap_cluster_peer
+ namespace: ''
+ - description: NetApp ONTAP Run any cli command, the username provided needs to
+ have console login permission.
+ name: na_ontap_command
+ namespace: ''
+ - description: NetApp ONTAP Assign disks to nodes
+ name: na_ontap_disks
+ namespace: ''
+ - description: NetApp ONTAP Create, delete, modify DNS servers.
+ name: na_ontap_dns
+ namespace: ''
+ - description: NetApp ONTAP Start, Stop and Enable FCP services.
+ name: na_ontap_fcp
+ namespace: ''
+ - description: NetApp ONTAP Manage a firewall policy
+ name: na_ontap_firewall_policy
+ namespace: ''
+ - description: Setup motd
+ name: na_ontap_motd
+ namespace: ''
+ - description: NetApp ONTAP Rename a node.
+ name: na_ontap_node
+ namespace: ''
+ - description: NetApp ONTAP or ElementSW Manage SnapMirror
+ name: na_ontap_snapmirror
+ namespace: ''
+ - description: NetApp ONTAP Update Software
+ name: na_ontap_software_update
+ namespace: ''
+ - description: NetApp ONTAP Modify SVM Options
+ name: na_ontap_svm_options
+ namespace: ''
+ - description: NetApp ONTAP Vserver peering
+ name: na_ontap_vserver_peer
+ namespace: ''
+ release_date: '2018-09-21'
+ 2.8.0:
+ modules:
+ - description: NetApp ONTAP FlexCache - create/delete relationship
+ name: na_ontap_flexcache
+ namespace: ''
+ - description: NetApp ONTAP igroup initiator configuration
+ name: na_ontap_igroup_initiator
+ namespace: ''
+ - description: NetApp ONTAP copy LUNs
+ name: na_ontap_lun_copy
+ namespace: ''
+ - description: NetApp ONTAP Create, delete, modify network subnets.
+ name: na_ontap_net_subnet
+ namespace: ''
+ - description: NetApp ONTAP Manage NVMe Service
+ name: na_ontap_nvme
+ namespace: ''
+ - description: NetApp ONTAP Manage NVME Namespace
+ name: na_ontap_nvme_namespace
+ namespace: ''
+ - description: NetApp ONTAP Manage NVME Subsystem
+ name: na_ontap_nvme_subsystem
+ namespace: ''
+ - description: NetApp ONTAP Create/Delete portset
+ name: na_ontap_portset
+ namespace: ''
+ - description: NetApp ONTAP manage policy group in Quality of Service.
+ name: na_ontap_qos_policy_group
+ namespace: ''
+ - description: NetApp ONTAP Quotas
+ name: na_ontap_quotas
+ namespace: ''
+ - description: NetApp ONTAP security key manager.
+ name: na_ontap_security_key_manager
+ namespace: ''
+ - description: NetApp ONTAP manage Snapshot Policy
+ name: na_ontap_snapshot_policy
+ namespace: ''
+ - description: NetApp ONTAP UNIX Group
+ name: na_ontap_unix_group
+ namespace: ''
+ - description: NetApp ONTAP UNIX users
+ name: na_ontap_unix_user
+ namespace: ''
+ - description: NetApp ONTAP Vscan on access policy configuration.
+ name: na_ontap_vscan_on_access_policy
+ namespace: ''
+ - description: NetApp ONTAP Vscan on demand task configuration.
+ name: na_ontap_vscan_on_demand_task
+ namespace: ''
+ - description: NetApp ONTAP Vscan Scanner Pools Configuration.
+ name: na_ontap_vscan_scanner_pool
+ namespace: ''
+ release_date: '2019-04-11'
+ 2.9.0:
+ modules:
+ - description: NetApp ONTAP manage efficiency policies (sis policies)
+ name: na_ontap_efficiency_policy
+ namespace: ''
+ - description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk.
+ name: na_ontap_firmware_upgrade
+ namespace: ''
+ - description: NetApp information gatherer
+ name: na_ontap_info
+ namespace: ''
+ - description: NetApp ONTAP Manage an ipspace
+ name: na_ontap_ipspace
+ namespace: ''
+ - description: NetApp ONTAP vserver nfs kerberos realm
+ name: na_ontap_kerberos_realm
+ namespace: ''
+ - description: NetApp ONTAP LDAP
+ name: na_ontap_ldap
+ namespace: ''
+ - description: NetApp ONTAP LDAP client
+ name: na_ontap_ldap_client
+ namespace: ''
+ - description: NetApp ONTAP NDMP services configuration
+ name: na_ontap_ndmp
+ namespace: ''
+ - description: NetApp ONTAP manage object store config.
+ name: na_ontap_object_store
+ namespace: ''
+ - description: NetApp ONTAP add/remove ports
+ name: na_ontap_ports
+ namespace: ''
+ - description: NetApp ONTAP Adaptive Quality of Service policy group.
+ name: na_ontap_qos_adaptive_policy_group
+ namespace: ''
+ - description: NetApp ONTAP Run any cli command, the username provided needs to
+ have console login permission.
+ name: na_ontap_rest_cli
+ namespace: ''
+ - description: NetApp ONTAP manage volume autosize
+ name: na_ontap_volume_autosize
+ namespace: ''
+ - description: NetApp ONTAP Vscan enable/disable.
+ name: na_ontap_vscan
+ namespace: ''
+ - description: NetApp ONTAP vserver CIFS security modification
+ name: na_ontap_vserver_cifs_security
+ namespace: ''
+ release_date: '2019-09-16'
+ 20.1.0:
+ changes:
+ bugfixes:
+ - na_ontap_aggregate - Fixed traceback when running as vsadmin and cleanly error
+ out.
+ - na_ontap_command - stdout_lines_filter contains data only if include/exlude_lines
+ parameter is used. (zeten30)
+ - na_ontap_command - stripped_line len is checked only once, filters are inside
+ if block. (zeten30)
+ - na_ontap_interface - allow module to run on node before joining the cluster.
+ - na_ontap_net_ifgrp - Fixed error for na_ontap_net_ifgrp if no port is given.
+ - na_ontap_snapmirror - Fixed traceback when running as vsadmin. Do not attempt
+ to break a relationship that is 'Uninitialized'.
+ - na_ontap_snapshot_policy - Fixed KeyError on ``prefix`` issue when prefix
+ parameter isn't supplied.
+ - na_ontap_volume - Fixed error reporting if efficiency policy cannot be read. Do
+ not attempt to read efficiency policy if not needed.
+ - na_ontap_volume - Fixed error when modifying volume efficiency policy.
+ - na_ontap_volume_clone - Fixed KeyError exception on ``volume``
+ minor_changes:
+ - na_ontap_aggregate - add ``snaplock_type``.
+ - na_ontap_dns - added REST support for dns creation and modification on cluster
+ vserver.
+ - na_ontap_igroup_initiator - ``force_remove`` to forcibly remove initiators
+ from an igroup that is currently mapped to a LUN.
+ - na_ontap_info - New info's added ``cifs_server_info``, ``cifs_share_info``,
+ ``cifs_vserver_security_info``, ``cluster_peer_info``, ``clock_info``, ``export_policy_info``,
+ ``export_rule_info``, ``fcp_adapter_info``, ``fcp_alias_info``, ``fcp_service_info``,
+ ``job_schedule_cron_info``, ``kerberos_realm_info``, ``ldap_client``, ``ldap_config``,
+ ``net_failover_group_info``, ``net_firewall_info``, ``net_ipspaces_info``,
+ ``net_port_broadcast_domain_info``, ``net_routes_info``, ``net_vlan_info``,
+ ``nfs_info``, ``ntfs_dacl_info``, ``ntfs_sd_info``, ``ntp_server_info``, ``role_info``,
+ ``service_processor_network_info``, ``sis_policy_info``, ``snapmirror_policy_info``,
+ ``snapshot_policy_info``, ``vscan_info``, ``vserver_peer_info``
+ - na_ontap_interface - ``failover_group`` to specify the failover group for
+ the LIF. ``is_ipv4_link_local`` to specify the LIF's are to acquire a ipv4
+ link local address.
+ - na_ontap_rest_cli - add OPTIONS as a supported verb and return list of allowed
+ verbs.
+ - na_ontap_volume - add ``group_id`` and ``user_id``.
+ fragments:
+ - 20.1.0.yaml
+ modules:
+ - description: Setup login banner and message of the day
+ name: na_ontap_login_messages
+ namespace: ''
+ release_date: '2020-01-08'
+ 20.10.0:
+ changes:
+ bugfixes:
+ - na_ontap_aggregate - support concurrent actions for rename/modify/add_object_store
+ and create/add_object_store.
+ - na_ontap_cluster - ``single_node_cluster`` option was ignored.
+ - na_ontap_info - KeyError on ``tree`` for quota_report_info.
+ - na_ontap_info - better reporting on KeyError traceback, option to ignore error.
+ - na_ontap_snapmirror_policy - report error when attempting to change ``policy_type``
+ rather than taking no action.
+ - na_ontap_volume - ``encrypt`` with a value of ``false`` is ignored when creating
+ a volume.
+ minor_changes:
+ - na_ontap_rest_info - Support for gather subsets - ``application_info, application_template_info,
+ autosupport_config_info , autosupport_messages_history, ontap_system_version,
+ storage_flexcaches_info, storage_flexcaches_origin_info, storage_ports_info,
+ storage_qos_policies, storage_qtrees_config, storage_quota_reports, storage_quota_policy_rules,
+ storage_shelves_config, storage_snapshot_policies, support_ems_config, support_ems_events,
+ support_ems_filters``
+ fragments:
+ - DEVOPS-2426.yaml
+ - DEVOPS-3113.yaml
+ - DEVOPS-3139.yaml
+ - DEVOPS-3167.yaml
+ - DEVOPS-3178.yaml
+ - DEVOPS-3194.yaml
+ - DEVOPS-3251.yaml
+ release_date: '2020-10-08'
+ 20.2.0:
+ changes:
+ bugfixes:
+ - na_ontap_cifs_server - Fixed KeyError exception on 'cifs_server_name'
+ - na_ontap_command - fixed traceback when using return_dict if u'1' is present
+ in result value.
+ - na_ontap_login_messages - Fixed example documentation and spelling mistake
+ issue
+ - na_ontap_nvme_subsystem - fixed bug when creating subsystem, vserver was not
+ filtered.
+ - na_ontap_qtree - Fixed issue with Get function for REST
+ - na_ontap_svm - if language C.UTF-8 is specified, the module is not idempotent
+ - na_ontap_svm - if snapshot policy is changed, modify fails with "Extra input
+ - snapshot_policy"
+ - na_ontap_volume_clone - fixed 'Extra input - parent-vserver' error when running
+ as cluster admin.
+ minor_changes:
+ - na_ontap_info - New info's added ``snapshot_info``
+ - na_ontap_info - ``max_records`` option to set maximum number of records to
+ return per subset.
+ - na_ontap_nas_create - role - fix typo in README file, add CIFS example. -
+ - na_ontap_snapmirror - ``relationship_state`` option for breaking the snapmirror
+ relationship.
+ - na_ontap_snapmirror - ``update_snapmirror`` option for updating the snapmirror
+ relationship.
+ - na_ontap_volume_clone - ``split`` option to split clone volume from parent
+ volume.
+ fragments:
+ - 20.2.0.yaml
+ modules:
+ - description: NetApp ONTAP manage volume snaplock retention.
+ name: na_ontap_volume_snaplock
+ namespace: ''
+ release_date: '2020-02-05'
+ 20.3.0:
+ changes:
+ bugfixes:
+ - na_ontap_volume_snaplock - Fixed KeyError exception on 'is-volume-append-mode-enabled'
+ - na_ontap_vscan_scanner_pool - has been updated to match the standard format
+ used for all other ontap modules
+ minor_changes:
+ - na_ontap_info - New info's added ``storage_bridge_info``
+ - na_ontap_info - New info's added `cluster_identity_info``
+ - na_ontap_snapmirror - performs resync when the ``relationship_state`` is active
+ and the current state is broken-off.
+ fragments:
+ - 20.3.0.yaml
+ modules:
+ - description: NetApp ONTAP create, delete or modify SnapMirror policies
+ name: na_ontap_snapmirror_policy
+ namespace: ''
+ - description: NetApp ONTAP SNMP traphosts.
+ name: na_ontap_snmp_traphosts
+ namespace: ''
+ release_date: '2020-03-04'
+ 20.4.0:
+ changes:
+ bugfixes:
+ - na_ontap_cifs_server - delete AD account if username and password are provided
+ when state=absent
+ - na_ontap_info - cifs_server_info - fix KeyError exception on ``domain`` if
+ only ``domain-workgroup`` is present.
+ - na_ontap_info - return all records of each gathered subset.
+ - na_ontap_iscsi_security - Fixed modify functionality for CHAP and typo correction
+ - na_ontap_kerberos_realm - fix ``kdc_vendor`` case sensitivity issue.
+ - na_ontap_snapmirror - calling quiesce before snapmirror break.
+ minor_changes:
+ - na_ontap_aggregate - ``disk_count`` option allows adding additional disk to
+ aggregate.
+ - na_ontap_info - ``max_records`` option specifies maximum number of records
+ returned in a single ZAPI call.
+ - na_ontap_info - ``summary`` option specifies a boolean flag to control return
+ all or none of the info attributes.
+ - na_ontap_info - new fact - iscsi_service_info.
+ - na_ontap_info - new fact - license_info.
+ - na_ontap_info - new fact - metrocluster_check_info.
+ - na_ontap_info - new fact - metrocluster_info.
+ - na_ontap_info - new fact - metrocluster_node_info.
+ - na_ontap_info - new fact - net_interface_service_policy_info.
+ - na_ontap_info - new fact - ontap_system_version.
+ - na_ontap_info - new fact - ontapi_version (and deprecate ontap_version, both
+ fields are reported for now).
+ - na_ontap_info - new fact - qtree_info.
+ - na_ontap_info - new fact - quota_report_info.
+ - na_ontap_info - new fact - snapmirror_destination_info.
+ - na_ontap_interface - ``service_policy`` option to identify a single service
+ or a list of services that will use a LIF.
+ - na_ontap_kerberos_realm - ``ad_server_ip`` option specifies IP Address of
+ the Active Directory Domain Controller (DC).
+ - na_ontap_kerberos_realm - ``ad_server_name`` option specifies Host name of
+ the Active Directory Domain Controller (DC).
+ - na_ontap_snapmirror - ``relationship-info-only`` option allows to manage relationship
+ information.
+ - na_ontap_snapmirror_policy - REST is included and all defaults are removed
+ from options.
+ - na_ontap_software_update - ``download_only`` options allows to download cluster
+ image without software update.
+ - na_ontap_volume - ``snapshot_auto_delete`` option allows to manage auto delete
+ settings of a specified volume.
+ fragments:
+ - 20.4.0.yaml
+ modules:
+ - description: NetApp ONTAP send AutoSupport message
+ name: na_ontap_autosupport_invoke
+ namespace: ''
+ - description: NetApp Ontap create, delate or modify NTFS DACL (discretionary
+ access control list)
+ name: na_ontap_ntfs_dacl
+ namespace: ''
+ - description: NetApp ONTAP create, delete or modify NTFS security descriptor
+ name: na_ontap_ntfs_sd
+ namespace: ''
+ - description: NetApp ONTAP Run any REST API on ONTAP
+ name: na_ontap_restit
+ namespace: ''
+ - description: NetApp ONTAP set FCP WWPN Alias
+ name: na_ontap_wwpn_alias
+ namespace: ''
+ - description: NetApp ONTAP Run any ZAPI on ONTAP
+ name: na_ontap_zapit
+ namespace: ''
+ release_date: '2020-04-01'
+ 20.4.1:
+ changes:
+ bugfixes:
+ - na_ontap_info - ``metrocluster_check_info`` has been removed as it was breaking
+ the info module for everyone who didn't have a metrocluster set up. We are
+ working on adding this back in a future update.
+ - na_ontap_volume - ``volume_security_style`` option now allows modify.
+ minor_changes:
+ - na_ontap_autosupport_invoke - added REST support for sending autosupport message.
+ - na_ontap_firmware_upgrade - ``force_disruptive_update`` and ``package_url``
+ options allows to make choices for download and upgrading packages.
+ - na_ontap_vserver_create has a new default variable ``netapp_version`` set
+ to 140. If you are running 9.2 or below please add the variable to your playbook
+ and set to 120
+ fragments:
+ - 20.4.1.yaml
+ release_date: '2020-04-13'
+ 20.5.0:
+ changes:
+ bugfixes:
+ - REST API call now honors the ``http_port`` parameter.
+ - REST API detection now works with vserver (use_rest - Auto).
+ - na_ontap_autosupport_invoke - when using ZAPI and name is not given, send
+ autosupport message to all nodes in the cluster.
+ - na_ontap_cg_snapshot - properly states it does not support check_mode.
+ - na_ontap_cluster - ONTAP 9.3 or earlier does not support ZAPI element single-node-cluster.
+ - na_ontap_cluster_ha - support check_mode.
+ - na_ontap_cluster_peer - EMS log wrongly uses destination credentials with
+ source hostname.
+ - na_ontap_cluster_peer - support check_mode.
+ - na_ontap_disks - support check_mode.
+ - na_ontap_dns - support check_mode.
+ - na_ontap_efficiency_policy - change ``duration`` type from int to str to support
+ '-' input.
+ - na_ontap_fcp - support check_mode.
+ - na_ontap_flexcache - support check_mode.
+ - na_ontap_info - `metrocluster_check_info` does not trigger a traceback but
+ adds an "error" info element if the target system is not set up for metrocluster.
+ - na_ontap_license - support check_mode.
+ - na_ontap_login_messages - fix documentation link.
+ - na_ontap_node - support check mode.
+ - na_ontap_ntfs_sd - documentation string update for examples and made sure
+ owner or group not mandatory.
+ - na_ontap_ports - now support check mode.
+ - na_ontap_restit - error can be a string in addition to a dict. This fix removes
+ a traceback with AttributeError.
+ - na_ontap_routes - support Check Mode correctly.
+ - na_ontap_snapmirror - support check_mode.
+ - na_ontap_software_update - Incorrectly stated that it support check mode,
+ it does not.
+ - na_ontap_svm_options - support check_mode.
+ - na_ontap_volume - fix KeyError on 'style' when volume is offline.
+ - na_ontap_volume - improve error reporting if required parameter is present
+ but not set.
+ - na_ontap_volume - suppress traceback in wait_for_completion as volume may
+ not be completely ready.
+ - na_ontap_volume_autosize - Support check_mode when `reset` option is given.
+ - na_ontap_volume_snaplock - fix documentation link.
+ - na_ontap_vserver_peer - EMS log wrongly uses destination credentials with
+ source hostname.
+ - na_ontap_vserver_peer - support check_mode.
+ minor_changes:
+ - na_ontap_aggregate - ``raid_type`` options supports 'raid_0' for ONTAP Select.
+ - na_ontap_cluster_config - role - Port Flowcontrol and autonegotiate can be
+ set in role
+ - na_ontap_cluster_peer - ``encryption_protocol_proposed`` option allows specifying
+ encryption protocol to be used for inter-cluster communication.
+ - na_ontap_info - new fact - aggr_efficiency_info.
+ - na_ontap_info - new fact - cluster_switch_info.
+ - na_ontap_info - new fact - disk_info.
+ - na_ontap_info - new fact - env_sensors_info.
+ - na_ontap_info - new fact - net_dev_discovery_info.
+ - na_ontap_info - new fact - service_processor_info.
+ - na_ontap_info - new fact - shelf_info.
+ - na_ontap_info - new fact - sis_info.
+ - na_ontap_info - new fact - subsys_health_info.
+ - na_ontap_info - new fact - sys_cluster_alerts.
+ - na_ontap_info - new fact - sysconfig_info.
+ - na_ontap_info - new fact - volume_move_target_aggr_info.
+ - na_ontap_info - new fact - volume_space_info.
+ - na_ontap_nvme_namespace - ``block_size`` option allows specifying size in
+ bytes of a logical block.
+ - na_ontap_snapmirror - snapmirror now allows resume feature.
+ - na_ontap_volume - ``cutover_action`` option allows specifying the action to
+ be taken for cutover.
+ fragments:
+ - 20.5.0.yaml
+ modules:
+ - description: NetApp ONTAP information gatherer using REST APIs
+ name: na_ontap_rest_info
+ namespace: ''
+ release_date: '2020-05-07'
+ 20.6.0:
+ changes:
+ bugfixes:
+ - module_utils/netapp_module - cater for empty lists in get_modified_attributes().
+ - module_utils/netapp_module - cater for lists with duplicate elements in compare_lists().
+ - na_ontap_firmware_upgrade - ignore timeout when downloading firmware images
+ by default.
+ - na_ontap_info - conversion from '-' to '_' was not done for lists of dictionaries.
+ - na_ontap_ntfs_dacl - example fix in documentation string.
+ - na_ontap_snapmirror - could not delete all rules (bug in netapp_module).
+ - na_ontap_volume - `wait_on_completion` is supported with volume moves.
+ - na_ontap_volume - fix KeyError on 'style' when volume is of type - data-protection.
+ - na_ontap_volume - modify was invoked multiple times when once is enough.
+ minor_changes:
+ - all modules - SSL certificate authentication in addition to username/password
+ (python 2.7 or 3.x).
+ - all modules - ``cert_filepath``, ``key_filepath`` to enable SSL certificate
+ authentication (python 2.7 or 3.x).
+ - na_ontap_disks - ``disk_type`` option allows to assign specified type of disk.
+ - na_ontap_firmware_upgrade - ignore timeout when downloading image unless ``fail_on_502_error``
+ is set to true.
+ - na_ontap_info - ``desired_attributes`` advanced feature to select which fields
+ to return.
+ - na_ontap_info - ``use_native_zapi_tags`` to disable the conversion of '_'
+ to '-' for attribute keys.
+ - na_ontap_pb_install_SSL_certificate.yml - playbook example - installing a
+ self-signed SSL certificate, and enabling SSL certificate authentication.
+ - na_ontap_rest_info - ``fields`` options to request specific fields from subset.
+ - na_ontap_snapmirror - now performs restore with optional field ``source_snapshot``
+ for specific snapshot or uses latest.
+ - na_ontap_software_update - ``stabilize_minutes`` option specifies number of
+ minutes needed to stabilize node before update.
+ - na_ontap_ucadapter - ``pair_adapters`` option allows specifying the list of
+ adapters which also need to be offline.
+ - na_ontap_user - ``authentication_password`` option specifies password for
+ the authentication protocol of SNMPv3 user.
+ - na_ontap_user - ``authentication_protocol`` option specifies authentication
+ protocol fo SNMPv3 user.
+ - na_ontap_user - ``engine_id`` option specifies authoritative entity's EngineID
+ for the SNMPv3 user.
+ - na_ontap_user - ``privacy_password`` option specifies password for the privacy
+ protocol of SNMPv3 user.
+ - na_ontap_user - ``privacy_protocol`` option specifies privacy protocol of
+ SNMPv3 user.
+ - na_ontap_user - ``remote_switch_ipaddress`` option specifies the IP Address
+ of the remote switch of SNMPv3 user.
+ - na_ontap_user - added REST support for ONTAP user creation, modification &
+ deletion.
+ - na_ontap_volume - ``auto_remap_luns`` option controls automatic mapping of
+ LUNs during volume rehost.
+ - na_ontap_volume - ``check_interval`` option checks if a volume move has been
+ completed and then waits this number of seconds before checking again.
+ - na_ontap_volume - ``force_restore`` option forces volume to restore even if
+ the volume has one or more newer Snapshotcopies.
+ - na_ontap_volume - ``force_unmap_luns`` option controls automatic unmapping
+ of LUNs during volume rehost.
+ - na_ontap_volume - ``from_vserver`` option allows volume rehost from one vserver
+ to another.
+ - na_ontap_volume - ``preserve_lun_ids`` option controls LUNs in the volume
+ being restored will remain mapped and their identities preserved.
+ - na_ontap_volume - ``snapshot_restore`` option specifies name of snapshot to
+ restore from.
+ fragments:
+ - 20.6.0.yaml
+ release_date: '2020-06-03'
+ 20.6.1:
+ changes:
+ bugfixes:
+ - na_ontap_firmware_upgrade - images are not downloaded, but the module reports
+ success.
+ - na_ontap_password - do not error out if password is identical to previous
+ password (idempotency).
+ - na_ontap_user - fixed KeyError if password is not provided.
+ minor_changes:
+ - na_ontap_firmware_upgrade - ``reboot_sp`` - reboot service processor before
+ downloading package.
+ - na_ontap_firmware_upgrade - ``rename_package`` - rename file when downloading
+ service processor package.
+ - na_ontap_firmware_upgrade - ``replace_package`` - replace local file when
+ downloading service processor package.
+ fragments:
+ - 20.6.1.yaml
+ release_date: '2020-06-08'
+ 20.7.0:
+ changes:
+ bugfixes:
+ - na_ontap_command - replace invalid backspace characters (0x08) with '.'.
+ - na_ontap_firmware_download - exception on PCDATA if ONTAP returns a BEL (0x07)
+ character.
+ - na_ontap_info - lists were incorrectly processed in convert_keys, returning
+ {}.
+ - na_ontap_info - qtree_info is missing most entries. Changed key from `vserver:id`
+ to `vserver:volume:id` .
+ - na_ontap_iscsi_security - adding no_log for password parameters.
+ - na_ontap_portset - adding explicit error message as modify portset is not
+ supported.
+ - na_ontap_snapmirror - fixed snapmirror delete for loadsharing to not go to
+ quiesce state for the rest of the set.
+ - na_ontap_ucadapter - fixed KeyError if type is not provided and mode is 'cna'.
+ - na_ontap_user - checked `applications` does not contain snmp when using REST
+ API call.
+ - na_ontap_user - fixed KeyError if locked key not set with REST API call.
+ - na_ontap_user - fixed KeyError if vserver - is empty with REST API call (useful
+ to indicate cluster scope).
+ - na_ontap_volume - fixed KeyError when getting info on a MVD volume
+ minor_changes:
+ - module_utils/netapp - add retry on wait_on_job when job failed. Abort 3 consecutive
+ errors.
+ - na_ontap_info - support ``continue_on_error`` option to continue when a ZAPI
+ is not supported on a vserver, or for cluster RPC errors.
+ - na_ontap_info - support ``query`` option to specify which objects to return.
+ - na_ontap_info - support ``vserver`` tunneling to limit output to one vserver.
+ - na_ontap_pb_get_online_volumes.yml - example playbook to list volumes that
+ are online (or offline).
+ - na_ontap_pb_install_SSL_certificate_REST.yml - example playbook to install
+ SSL certificates using REST APIs.
+ - na_ontap_rest_info - Support for gather subsets - ``cluster_node_info, cluster_peer_info,
+ disk_info, cifs_services_info, cifs_share_info``.
+ - na_ontap_snapmirror_policy - support for SnapMirror policy rules.
+ - na_ontap_vscan_scanner_pool - support modification.
+ fragments:
+ - 20.7.0.yaml
+ modules:
+ - description: NetApp ONTAP manage security certificates.
+ name: na_ontap_security_certificates
+ namespace: ''
+ release_date: '2020-06-24'
+ 20.8.0:
+ changes:
+ bugfixes:
+ - na_ontap_aggregate - ``disk-info`` error when using ``disks`` option.
+ - na_ontap_autosupport_invoke - ``message`` has changed to ``autosupport_message``
+ as Redhat has reserved this word. ``message`` has been alias'd to ``autosupport_message``.
+ - na_ontap_cifs_vserver - fix documentation and add more examples.
+ - na_ontap_cluster - module was not idempotent when changing location or contact
+ information.
+ - na_ontap_igroup - idempotency issue when using uppercase hex digits (A, B,
+ C, D, E, F) in WWN (ONTAP uses lowercase).
+ - na_ontap_igroup_initiator - idempotency issue when using uppercase hex digits
+ (A, B, C, D, E, F) in WWN (ONTAP uses lowercase).
+ - na_ontap_info - Fixed error causing module to fail on ``metrocluster_check_info``,
+ ``env_sensors_info`` and ``volume_move_target_aggr_info``.
+ - na_ontap_security_certificates - allows (``common_name``, ``type``) as an
+ alternate key since ``name`` is not supported in ONTAP 9.6 and 9.7.
+ - na_ontap_snapmirror - fixed KeyError when accessing ``elationship_type`` parameter.
+ - na_ontap_snapmirror_policy - fixed a race condition when creating a new policy.
+ - na_ontap_snapmirror_policy - fixed idempotency issue withis_network_compression_enabled
+ for REST.
+ - na_ontap_software_update - ignore connection errors during update as nodes
+ cannot be reachable.
+ - na_ontap_user - enable lock state and password to be set in the same task
+ for existing user.
+ - na_ontap_volume - issue when snapdir_access and atime_update not passed together.
+ - na_ontap_vscan_on_access_policy - ``bool`` type was not properly set for ``scan_files_with_no_ext``.
+ - na_ontap_vscan_on_access_policy - ``policy_status`` enable/disable option
+ was not supported.
+ - na_ontap_vscan_on_demand_task - ``file_ext_to_include`` was not handled properly.
+ - na_ontap_vscan_scanner_pool_policy - scanner_pool apply policy support on
+ modification.
+ - na_ontap_vserver_create(role) - lif creation now defaults to system-defined
+ unless iscsi lif type.
+ - use_rest is now case insensitive.
+ minor_changes:
+ - add ``type:`` and ``elements:`` information where missing.
+ - na_ontap_aggregate - support ``disk_size_with_unit`` option.
+ - na_ontap_ldap_client - support ``ad_domain`` and ``preferred_ad_server`` options.
+ - na_ontap_qtree - ``force_delete`` option with a DEFAULT of ``true`` so that
+ ZAPI behavior is aligned with REST.
+ - na_ontap_rest_info - Support for gather subsets - ``cloud_targets_info, cluster_chassis_info,
+ cluster_jobs_info, cluster_metrics_info, cluster_schedules, broadcast_domains_info,
+ cluster_software_history, cluster_software_packages, network_ports_info, ip_interfaces_info,
+ ip_routes_info, ip_service_policies, network_ipspaces_info, san_fc_logins_info,
+ san_fc_wppn-aliases, svm_dns_config_info, svm_ldap_config_info, svm_name_mapping_config_info,
+ svm_nis_config_info, svm_peers_info, svm_peer-permissions_info``.
+ - na_ontap_rest_info - Support for gather subsets for 9.8+ - ``cluster_metrocluster_diagnostics``.
+ - na_ontap_security_certificates - ``ignore_name_if_not_supported`` option to
+ not fail if ``name`` is present since ``name`` is not supported in ONTAP 9.6
+ and 9.7.
+ - na_ontap_software_update - added ``timeout`` option to give enough time for
+ the update to complete.
+ - update ``required:`` information.
+ - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same
+ thing for 2.8 and 2.9.
+ fragments:
+ - 20.8.0.yaml
+ modules:
+ - description: NetApp ONTAP create, delete, or modify vserver security file-directory
+ policy
+ name: na_ontap_file_directory_policy
+ namespace: ''
+ - description: NetApp ONTAP Run any cli command over plain SSH using paramiko.
+ name: na_ontap_ssh_command
+ namespace: ''
+ - description: NetApp ONTAP wait_for_condition. Loop over a get status request
+ until a condition is met.
+ name: na_ontap_wait_for_condition
+ namespace: ''
+ release_date: '2020-08-05'
+ 20.9.0:
+ changes:
+ bugfixes:
+ - na_ontap_* - change version_added from '2.6' to '2.6.0' where applicable to
+ satisfy sanity checker.
+ - na_ontap_cluster - ``check_mode`` is now working properly.
+ - na_ontap_interface - ``home_node`` is not required in pre-cluster mode.
+ - na_ontap_interface - ``role`` is not required if ``service_policy`` is present
+ and ONTAP version is 9.8.
+ - na_ontap_interface - traceback in get_interface if node is not reachable.
+ - na_ontap_job_schedule - allow ``job_minutes`` to set number to -1 for job
+ creation with REST too.
+ - na_ontap_qtree - fixed ``None is not subscriptable`` exception on rename operation.
+ - na_ontap_volume - fixed ``KeyError`` exception on ``size`` when reporting
+ creation error.
+ - netapp.py - uncaught exception (traceback) on zapi.NaApiError.
+ minor_changes:
+ - na_ontap_cluster - ``node_name`` to set the node name when adding a node,
+ or as an alternative to `cluster_ip_address`` to remove a node.
+ - na_ontap_cluster - ``state`` can be set to ``absent`` to remove a node identified
+ with ``cluster_ip_address`` or ``node_name``.
+ - na_ontap_qtree - ``wait_for_completion`` and ``time_out`` to wait for qtree
+ deletion when using REST.
+ - na_ontap_quotas - ``soft_disk_limit`` and ``soft_file_limit`` for the quota
+ target.
+ - na_ontap_rest_info - Support for gather subsets - ``initiator_groups_info,
+ san_fcp_services, san_iscsi_credentials, san_iscsi_services, san_lun_maps,
+ storage_luns_info, storage_NVMe_namespaces.``
+ fragments:
+ - 20.9.0.yaml
+ modules:
+ - description: NetApp ONTAP configure active directory
+ name: na_ontap_active_directory
+ namespace: ''
+ - description: NetApp ONTAP Add and Remove MetroCluster Mediator
+ name: na_ontap_mcc_mediator
+ namespace: ''
+ - description: NetApp ONTAP set up a MetroCluster
+ name: na_ontap_metrocluster
+ namespace: ''
+ release_date: '2020-09-02'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/config.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/config.yaml
new file mode 100644
index 00000000..ee774595
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/config.yaml
@@ -0,0 +1,32 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sanitize_changelog: true
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: NetApp ONTAP Collection
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/19.10.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/19.10.0.yaml
new file mode 100644
index 00000000..533eddf8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/19.10.0.yaml
@@ -0,0 +1,40 @@
+minor_changes:
+ - na_ontap_command - ``vserver`` - to allow command to run as either cluster admin or vserver admin. To run as vserver admin you must use the vserver option.
+ - na_ontap_motd - rename ``message`` to ``motd_message`` to avoid conflict with Ansible internal variable name.
+ - na_ontap_nvme_namespace - ``size_unit`` to specify size in different units.
+ - na_ontap_snapshot_policy - ``prefix`` - option to use for creating snapshot policy.
+ - |
+ Added REST support to existing modules.
+ By default, the module will use REST if the target system supports it, and the options are supported. Otherwise, it will switch back to ZAPI.
+ This behavior can be controlled with the ``use_rest`` option.
+ Always - to force REST. The module fails and reports an error if REST cannot be used.
+ Never - to force ZAPI. This could be useful if you find some incompatibility with REST, or want to confirm the behavior is identical between REST and ZAPI.
+ Auto - the default, as described above.
+ - na_ontap_ipspace - REST support
+ - na_ontap_export_policy - REST support
+ - na_ontap_ndmp - REST support - only ``enable`` and ``authtype`` are supported with REST
+ - na_ontap_net_routes - REST support
+ - na_ontap_qtree - REST support - ``oplocks`` is not supported with REST, defaults to enable.
+ - na_ontap_svm - REST support - ``root_volume``, ``root_volume_aggregate``, ``root_volume_security_style`` are not supported with REST.
+ - na_ontap_job_schedule - REST support
+ - na_ontap_cluster_config - role updated to support a cleaner playbook
+ - na_ontap_vserver_create - role updated to support a cleaner playbook
+ - na_ontap_nas_create - role updated to support a cleaner playbook
+ - na_ontap_san_create - role updated to support a cleaner playbook
+
+bugfixes:
+ - na_ontap_ndmp - minor documentation changes for restore_vm_cache_size and data_port_range.
+ - na_ontap_qtree - REST API takes "unix_permissions" as parameter instead of "mode".
+ - na_ontap_qtree - unix permission is not available when security style is ntfs
+ - na_ontap_user - minor documentation update for application parameter.
+ - na_ontap_volume - ``efficiency_policy`` was ignored
+ - na_ontap_volume - enforce that space_slo and space_guarantee are mutually exclusive
+ - na_ontap_svm - ``allowed_protocols`` added to param in proper way in case of using REST API
+ - na_ontap_firewall_policy - documentation changed for supported service parameter.
+ - na_ontap_net_subnet - fix ip_ranges option fails on existing subnet.
+ - na_ontap_snapshot_policy - fix vsadmin approach for managing snapshot policy.
+ - na_ontap_nvme_subsystem - fix fetching unique nvme subsytem based on vserver filter.
+ - na ontap_net_routes - change metric type from string to int.
+ - na_ontap_cifs_server - minor documentation changes correction of create example with "name" parameter and adding type to parameters.
+ - na_ontap_vserver_cifs_security - fix int and boolean options when modifying vserver cifs security.
+ - na_ontap_net_subnet - fix rename idempotency issue and updated rename check.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/19.11.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/19.11.0.yaml
new file mode 100644
index 00000000..d61b59ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/19.11.0.yaml
@@ -0,0 +1,16 @@
+minor_changes:
+ - na_ontap_cluster - added single node cluster option, also now supports for modify cluster contact and location option.
+ - na_ontap_info - Now allow you use to vsadmin to get info (Must user ``vserver`` option).
+ - na_ontap_info - Added ``vscan_status_info``, ``vscan_scanner_pool_info``, ``vscan_connection_status_all_info``, ``vscan_connection_extended_stats_info``
+ - na_ontap_efficiency_policy - ``changelog_threshold_percent`` to set the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour.
+
+### Bug Fixes
+bugfixes:
+ - na_ontap_cluster - autosupport log pushed after cluster create is performed, removed license add or remove option.
+ - na_ontap_dns - report error if modify or delete operations are attempted on cserver when using REST. Make create operation idempotent for cserver when using REST. Support for modify/delete on cserver when using REST will be added later.
+ - na_ontap_firewall_policy - portmap added as a valid service
+ - na_ontap_net_routes - REST does not support the ``metric`` attribute
+ - na_ontap_snapmirror - added initialize boolean option which specifies whether to initialize SnapMirror relation.
+ - na_ontap_volume - fixed error when deleting flexGroup volume with ONTAP 9.7.
+ - na_ontap_volume - tiering option requires 9.4 or later (error on volume-comp-aggr-attributes)
+ - na_ontap_vscan_scanner_pool - fix module only gets one scanner pool.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.1.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.1.0.yaml
new file mode 100644
index 00000000..ce431b9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.1.0.yaml
@@ -0,0 +1,20 @@
+minor_changes:
+ - na_ontap_aggregate - add ``snaplock_type``.
+ - na_ontap_info - New info's added ``cifs_server_info``, ``cifs_share_info``, ``cifs_vserver_security_info``, ``cluster_peer_info``, ``clock_info``, ``export_policy_info``, ``export_rule_info``, ``fcp_adapter_info``, ``fcp_alias_info``, ``fcp_service_info``, ``job_schedule_cron_info``, ``kerberos_realm_info``, ``ldap_client``, ``ldap_config``, ``net_failover_group_info``, ``net_firewall_info``, ``net_ipspaces_info``, ``net_port_broadcast_domain_info``, ``net_routes_info``, ``net_vlan_info``, ``nfs_info``, ``ntfs_dacl_info``, ``ntfs_sd_info``, ``ntp_server_info``, ``role_info``, ``service_processor_network_info``, ``sis_policy_info``, ``snapmirror_policy_info``, ``snapshot_policy_info``, ``vscan_info``, ``vserver_peer_info``
+ - na_ontap_igroup_initiator - ``force_remove`` to forcibly remove initiators from an igroup that is currently mapped to a LUN.
+ - na_ontap_interface - ``failover_group`` to specify the failover group for the LIF. ``is_ipv4_link_local`` to specify the LIF's are to acquire a ipv4 link local address.
+ - na_ontap_rest_cli - add OPTIONS as a supported verb and return list of allowed verbs.
+ - na_ontap_volume - add ``group_id`` and ``user_id``.
+ - na_ontap_dns - added REST support for dns creation and modification on cluster vserver.
+
+bugfixes:
+ - na_ontap_aggregate - Fixed traceback when running as vsadmin and cleanly error out.
+ - na_ontap_command - stdout_lines_filter contains data only if include/exlude_lines parameter is used. (zeten30)
+ - na_ontap_command - stripped_line len is checked only once, filters are inside if block. (zeten30)
+ - na_ontap_interface - allow module to run on node before joining the cluster.
+ - na_ontap_net_ifgrp - Fixed error for na_ontap_net_ifgrp if no port is given.
+ - na_ontap_snapmirror - Fixed traceback when running as vsadmin. Do not attempt to break a relationship that is 'Uninitialized'.
+ - na_ontap_snapshot_policy - Fixed KeyError on ``prefix`` issue when prefix parameter isn't supplied.
+ - na_ontap_volume - Fixed error reporting if efficiency policy cannot be read. Do not attempt to read efficiency policy if not needed.
+ - na_ontap_volume - Fixed error when modifying volume efficiency policy.
+ - na_ontap_volume_clone - Fixed KeyError exception on ``volume``
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.2.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.2.0.yaml
new file mode 100644
index 00000000..db7b4073
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.2.0.yaml
@@ -0,0 +1,17 @@
+minor_changes:
+ - na_ontap_info - New info's added ``snapshot_info``
+ - na_ontap_info - ``max_records`` option to set maximum number of records to return per subset.
+ - na_ontap_snapmirror - ``relationship_state`` option for breaking the snapmirror relationship.
+ - na_ontap_snapmirror - ``update_snapmirror`` option for updating the snapmirror relationship.
+ - na_ontap_volume_clone - ``split`` option to split clone volume from parent volume.
+ - na_ontap_nas_create - role - fix typo in README file, add CIFS example. -
+
+bugfixes:
+ - na_ontap_cifs_server - Fixed KeyError exception on 'cifs_server_name'
+ - na_ontap_command - fixed traceback when using return_dict if u'1' is present in result value.
+ - na_ontap_login_messages - Fixed example documentation and spelling mistake issue
+ - na_ontap_nvme_subsystem - fixed bug when creating subsystem, vserver was not filtered.
+ - na_ontap_svm - if snapshot policy is changed, modify fails with "Extra input - snapshot_policy"
+ - na_ontap_svm - if language C.UTF-8 is specified, the module is not idempotent
+ - na_ontap_volume_clone - fixed 'Extra input - parent-vserver' error when running as cluster admin.
+ - na_ontap_qtree - Fixed issue with Get function for REST
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.3.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.3.0.yaml
new file mode 100644
index 00000000..4d63f0e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.3.0.yaml
@@ -0,0 +1,8 @@
+minor_changes:
+ - na_ontap_info - New info's added `cluster_identity_info``
+ - na_ontap_info - New info's added ``storage_bridge_info``
+ - na_ontap_snapmirror - performs resync when the ``relationship_state`` is active and the current state is broken-off.
+
+bugfixes:
+ - na_ontap_vscan_scanner_pool - has been updated to match the standard format used for all other ontap modules
+ - na_ontap_volume_snaplock - Fixed KeyError exception on 'is-volume-append-mode-enabled'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.4.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.4.0.yaml
new file mode 100644
index 00000000..03186163
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.4.0.yaml
@@ -0,0 +1,30 @@
+minor_changes:
+ - na_ontap_aggregate - ``disk_count`` option allows adding additional disk to aggregate.
+ - na_ontap_info - ``max_records`` option specifies maximum number of records returned in a single ZAPI call.
+ - na_ontap_info - ``summary`` option specifies a boolean flag to control return all or none of the info attributes.
+ - na_ontap_info - new fact - iscsi_service_info.
+ - na_ontap_info - new fact - license_info.
+ - na_ontap_info - new fact - metrocluster_info.
+ - na_ontap_info - new fact - metrocluster_check_info.
+ - na_ontap_info - new fact - metrocluster_node_info.
+ - na_ontap_info - new fact - net_interface_service_policy_info.
+ - na_ontap_info - new fact - ontap_system_version.
+ - na_ontap_info - new fact - ontapi_version (and deprecate ontap_version, both fields are reported for now).
+ - na_ontap_info - new fact - qtree_info.
+ - na_ontap_info - new fact - quota_report_info.
+ - na_ontap_info - new fact - snapmirror_destination_info.
+ - na_ontap_interface - ``service_policy`` option to identify a single service or a list of services that will use a LIF.
+ - na_ontap_kerberos_realm - ``ad_server_ip`` option specifies IP Address of the Active Directory Domain Controller (DC).
+ - na_ontap_kerberos_realm - ``ad_server_name`` option specifies Host name of the Active Directory Domain Controller (DC).
+ - na_ontap_snapmirror_policy - REST is included and all defaults are removed from options.
+ - na_ontap_snapmirror - ``relationship-info-only`` option allows to manage relationship information.
+ - na_ontap_software_update - ``download_only`` options allows to download cluster image without software update.
+ - na_ontap_volume - ``snapshot_auto_delete`` option allows to manage auto delete settings of a specified volume.
+
+bugfixes:
+ - na_ontap_cifs_server - delete AD account if username and password are provided when state=absent
+ - na_ontap_info - return all records of each gathered subset.
+ - na_ontap_info - cifs_server_info - fix KeyError exception on ``domain`` if only ``domain-workgroup`` is present.
+ - na_ontap_iscsi_security - Fixed modify functionality for CHAP and typo correction
+ - na_ontap_kerberos_realm - fix ``kdc_vendor`` case sensitivity issue.
+ - na_ontap_snapmirror - calling quiesce before snapmirror break.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.4.1.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.4.1.yaml
new file mode 100644
index 00000000..3c772aec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.4.1.yaml
@@ -0,0 +1,10 @@
+minor_changes:
+ - na_ontap_firmware_upgrade - ``force_disruptive_update`` and ``package_url`` options allows to make choices for download and upgrading packages.
+ - na_ontap_autosupport_invoke - added REST support for sending autosupport message.
+ - na_ontap_vserver_create has a new default variable ``netapp_version`` set to 140.
+ If you are running 9.2 or below please add the variable to your playbook and set to 120
+
+bugfixes:
+ - na_ontap_volume - ``volume_security_style`` option now allows modify.
+ - na_ontap_info - ``metrocluster_check_info`` has been removed as it was breaking the info module for everyone who didn't have a metrocluster set up.
+ We are working on adding this back in a future update.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.5.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.5.0.yaml
new file mode 100644
index 00000000..7ea16526
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.5.0.yaml
@@ -0,0 +1,53 @@
+minor_changes:
+ - na_ontap_aggregate - ``raid_type`` options supports 'raid_0' for ONTAP Select.
+ - na_ontap_cluster_peer - ``encryption_protocol_proposed`` option allows specifying encryption protocol to be used for inter-cluster communication.
+ - na_ontap_info - new fact - aggr_efficiency_info.
+ - na_ontap_info - new fact - cluster_switch_info.
+ - na_ontap_info - new fact - disk_info.
+ - na_ontap_info - new fact - env_sensors_info.
+ - na_ontap_info - new fact - net_dev_discovery_info.
+ - na_ontap_info - new fact - service_processor_info.
+ - na_ontap_info - new fact - shelf_info.
+ - na_ontap_info - new fact - sis_info.
+ - na_ontap_info - new fact - subsys_health_info.
+ - na_ontap_info - new fact - sysconfig_info.
+ - na_ontap_info - new fact - sys_cluster_alerts.
+ - na_ontap_info - new fact - volume_move_target_aggr_info.
+ - na_ontap_info - new fact - volume_space_info.
+ - na_ontap_nvme_namespace - ``block_size`` option allows specifying size in bytes of a logical block.
+ - na_ontap_snapmirror - snapmirror now allows resume feature.
+ - na_ontap_volume - ``cutover_action`` option allows specifying the action to be taken for cutover.
+ - na_ontap_cluster_config - role - Port Flowcontrol and autonegotiate can be set in role
+
+bugfixes:
+ - REST API call now honors the ``http_port`` parameter.
+ - REST API detection now works with vserver (use_rest - Auto).
+ - na_ontap_autosupport_invoke - when using ZAPI and name is not given, send autosupport message to all nodes in the cluster.
+ - na_ontap_cg_snapshot - properly states it does not support check_mode.
+ - na_ontap_cluster - ONTAP 9.3 or earlier does not support ZAPI element single-node-cluster.
+ - na_ontap_cluster_ha - support check_mode.
+ - na_ontap_cluster_peer - support check_mode.
+ - na_ontap_cluster_peer - EMS log wrongly uses destination credentials with source hostname.
+ - na_ontap_disks - support check_mode.
+ - na_ontap_dns - support check_mode.
+ - na_ontap_efficiency_policy - change ``duration`` type from int to str to support '-' input.
+ - na_ontap_fcp - support check_mode.
+ - na_ontap_flexcache - support check_mode.
+ - na_ontap_info - `metrocluster_check_info` does not trigger a traceback but adds an "error" info element if the target system is not set up for metrocluster.
+ - na_ontap_license - support check_mode.
+ - na_ontap_login_messages - fix documentation link.
+ - na_ontap_node - support check mode.
+ - na_ontap_ntfs_sd - documentation string update for examples and made sure owner or group not mandatory.
+ - na_ontap_ports - now support check mode.
+ - na_ontap_restit - error can be a string in addition to a dict. This fix removes a traceback with AttributeError.
+ - na_ontap_routes - support Check Mode correctly.
+ - na_ontap_snapmirror - support check_mode.
+ - na_ontap_software_update - Incorrectly stated that it support check mode, it does not.
+ - na_ontap_svm_options - support check_mode.
+ - na_ontap_volume - improve error reporting if required parameter is present but not set.
+ - na_ontap_volume - suppress traceback in wait_for_completion as volume may not be completely ready.
+ - na_ontap_volume - fix KeyError on 'style' when volume is offline.
+ - na_ontap_volume_autosize - Support check_mode when `reset` option is given.
+ - na_ontap_volume_snaplock - fix documentation link.
+ - na_ontap_vserver_peer - support check_mode.
+ - na_ontap_vserver_peer - EMS log wrongly uses destination credentials with source hostname.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.6.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.6.0.yaml
new file mode 100644
index 00000000..792ab614
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.6.0.yaml
@@ -0,0 +1,37 @@
+minor_changes:
+ - na_ontap_disks - ``disk_type`` option allows to assign specified type of disk.
+ - na_ontap_firmware_upgrade - ignore timeout when downloading image unless ``fail_on_502_error`` is set to true.
+ - na_ontap_info - ``desired_attributes`` advanced feature to select which fields to return.
+ - na_ontap_info - ``use_native_zapi_tags`` to disable the conversion of '_' to '-' for attribute keys.
+ - na_ontap_rest_info - ``fields`` options to request specific fields from subset.
+ - na_ontap_software_update - ``stabilize_minutes`` option specifies number of minutes needed to stabilize node before update.
+ - na_ontap_snapmirror - now performs restore with optional field ``source_snapshot`` for specific snapshot or uses latest.
+ - na_ontap_ucadapter - ``pair_adapters`` option allows specifying the list of adapters which also need to be offline.
+ - na_ontap_user - ``authentication_password`` option specifies password for the authentication protocol of SNMPv3 user.
+ - na_ontap_user - ``authentication_protocol`` option specifies authentication protocol fo SNMPv3 user.
+ - na_ontap_user - ``engine_id`` option specifies authoritative entity's EngineID for the SNMPv3 user.
+ - na_ontap_user - ``privacy_password`` option specifies password for the privacy protocol of SNMPv3 user.
+ - na_ontap_user - ``privacy_protocol`` option specifies privacy protocol of SNMPv3 user.
+ - na_ontap_user - ``remote_switch_ipaddress`` option specifies the IP Address of the remote switch of SNMPv3 user.
+ - na_ontap_volume - ``check_interval`` option checks if a volume move has been completed and then waits this number of seconds before checking again.
+ - na_ontap_volume - ``auto_remap_luns`` option controls automatic mapping of LUNs during volume rehost.
+ - na_ontap_volume - ``force_restore`` option forces volume to restore even if the volume has one or more newer Snapshotcopies.
+ - na_ontap_volume - ``force_unmap_luns`` option controls automatic unmapping of LUNs during volume rehost.
+ - na_ontap_volume - ``from_vserver`` option allows volume rehost from one vserver to another.
+ - na_ontap_volume - ``preserve_lun_ids`` option controls LUNs in the volume being restored will remain mapped and their identities preserved.
+ - na_ontap_volume - ``snapshot_restore`` option specifies name of snapshot to restore from.
+ - all modules - ``cert_filepath``, ``key_filepath`` to enable SSL certificate authentication (python 2.7 or 3.x).
+ - all modules - SSL certificate authentication in addition to username/password (python 2.7 or 3.x).
+ - na_ontap_pb_install_SSL_certificate.yml - playbook example - installing a self-signed SSL certificate, and enabling SSL certificate authentication.
+ - na_ontap_user - added REST support for ONTAP user creation, modification & deletion.
+
+bugfixes:
+ - na_ontap_firmware_upgrade - ignore timeout when downloading firmware images by default.
+ - na_ontap_info - conversion from '-' to '_' was not done for lists of dictionaries.
+ - na_ontap_ntfs_dacl - example fix in documentation string.
+ - na_ontap_snapmirror - could not delete all rules (bug in netapp_module).
+ - na_ontap_volume - modify was invoked multiple times when once is enough.
+ - na_ontap_volume - fix KeyError on 'style' when volume is of type - data-protection.
+ - na_ontap_volume - `wait_on_completion` is supported with volume moves.
+ - module_utils/netapp_module - cater for empty lists in get_modified_attributes().
+ - module_utils/netapp_module - cater for lists with duplicate elements in compare_lists().
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.6.1.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.6.1.yaml
new file mode 100644
index 00000000..572d8499
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.6.1.yaml
@@ -0,0 +1,9 @@
+minor_changes:
+- na_ontap_firmware_upgrade - ``reboot_sp`` - reboot service processor before downloading package.
+- na_ontap_firmware_upgrade - ``rename_package`` - rename file when downloading service processor package.
+- na_ontap_firmware_upgrade - ``replace_package`` - replace local file when downloading service processor package.
+
+bugfixes:
+- na_ontap_firmware_upgrade - images are not downloaded, but the module reports success.
+- na_ontap_user - fixed KeyError if password is not provided.
+- na_ontap_password - do not error out if password is identical to previous password (idempotency).
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.7.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.7.0.yaml
new file mode 100644
index 00000000..7dae6c22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.7.0.yaml
@@ -0,0 +1,24 @@
+minor_changes:
+ - na_ontap_info - support ``continue_on_error`` option to continue when a ZAPI is not supported on a vserver, or for cluster RPC errors.
+ - na_ontap_info - support ``query`` option to specify which objects to return.
+ - na_ontap_info - support ``vserver`` tunneling to limit output to one vserver.
+ - na_ontap_snapmirror_policy - support for SnapMirror policy rules.
+ - na_ontap_vscan_scanner_pool - support modification.
+ - na_ontap_rest_info - Support for gather subsets - ``cluster_node_info, cluster_peer_info, disk_info, cifs_services_info, cifs_share_info``.
+ - module_utils/netapp - add retry on wait_on_job when job failed. Abort 3 consecutive errors.
+ - na_ontap_pb_get_online_volumes.yml - example playbook to list volumes that are online (or offline).
+ - na_ontap_pb_install_SSL_certificate_REST.yml - example playbook to install SSL certificates using REST APIs.
+
+bugfixes:
+ - na_ontap_command - replace invalid backspace characters (0x08) with '.'.
+ - na_ontap_firmware_download - exception on PCDATA if ONTAP returns a BEL (0x07) character.
+ - na_ontap_info - lists were incorrectly processed in convert_keys, returning {}.
+ - na_ontap_info - qtree_info is missing most entries. Changed key from `vserver:id` to `vserver:volume:id` .
+ - na_ontap_iscsi_security - adding no_log for password parameters.
+ - na_ontap_portset - adding explicit error message as modify portset is not supported.
+ - na_ontap_snapmirror - fixed snapmirror delete for loadsharing to not go to quiesce state for the rest of the set.
+ - na_ontap_ucadapter - fixed KeyError if type is not provided and mode is 'cna'.
+ - na_ontap_user - checked `applications` does not contain snmp when using REST API call.
+ - na_ontap_user - fixed KeyError if locked key not set with REST API call.
+ - na_ontap_user - fixed KeyError if vserver - is empty with REST API call (useful to indicate cluster scope).
+ - na_ontap_volume - fixed KeyError when getting info on a MVD volume
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.8.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.8.0.yaml
new file mode 100644
index 00000000..59f05d2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.8.0.yaml
@@ -0,0 +1,33 @@
+minor_changes:
+ - na_ontap_aggregate - support ``disk_size_with_unit`` option.
+ - na_ontap_ldap_client - support ``ad_domain`` and ``preferred_ad_server`` options.
+ - na_ontap_rest_info - Support for gather subsets - ``cloud_targets_info, cluster_chassis_info, cluster_jobs_info, cluster_metrics_info, cluster_schedules, broadcast_domains_info, cluster_software_history, cluster_software_packages, network_ports_info, ip_interfaces_info, ip_routes_info, ip_service_policies, network_ipspaces_info, san_fc_logins_info, san_fc_wppn-aliases, svm_dns_config_info, svm_ldap_config_info, svm_name_mapping_config_info, svm_nis_config_info, svm_peers_info, svm_peer-permissions_info``.
+ - na_ontap_rest_info - Support for gather subsets for 9.8+ - ``cluster_metrocluster_diagnostics``.
+ - na_ontap_qtree - ``force_delete`` option with a DEFAULT of ``true`` so that ZAPI behavior is aligned with REST.
+ - na_ontap_security_certificates - ``ignore_name_if_not_supported`` option to not fail if ``name`` is present since ``name`` is not supported in ONTAP 9.6 and 9.7.
+ - na_ontap_software_update - added ``timeout`` option to give enough time for the update to complete.
+ - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+ - add ``type:`` and ``elements:`` information where missing.
+ - update ``required:`` information.
+
+bugfixes:
+ - na_ontap_aggregate - ``disk-info`` error when using ``disks`` option.
+ - na_ontap_autosupport_invoke - ``message`` has changed to ``autosupport_message`` as Redhat has reserved this word. ``message`` has been alias'd to ``autosupport_message``.
+ - na_ontap_cifs_vserver - fix documentation and add more examples.
+ - na_ontap_cluster - module was not idempotent when changing location or contact information.
+ - na_ontap_igroup - idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase).
+ - na_ontap_igroup_initiator - idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase).
+ - na_ontap_security_certificates - allows (``common_name``, ``type``) as an alternate key since ``name`` is not supported in ONTAP 9.6 and 9.7.
+ - na_ontap_info - Fixed error causing module to fail on ``metrocluster_check_info``, ``env_sensors_info`` and ``volume_move_target_aggr_info``.
+ - na_ontap_snapmirror - fixed KeyError when accessing ``elationship_type`` parameter.
+ - na_ontap_snapmirror_policy - fixed a race condition when creating a new policy.
+ - na_ontap_snapmirror_policy - fixed idempotency issue withis_network_compression_enabled for REST.
+ - na_ontap_software_update - ignore connection errors during update as nodes cannot be reachable.
+ - na_ontap_user - enable lock state and password to be set in the same task for existing user.
+ - na_ontap_volume - issue when snapdir_access and atime_update not passed together.
+ - na_ontap_vscan_on_access_policy - ``bool`` type was not properly set for ``scan_files_with_no_ext``.
+ - na_ontap_vscan_on_access_policy - ``policy_status`` enable/disable option was not supported.
+ - na_ontap_vscan_on_demand_task - ``file_ext_to_include`` was not handled properly.
+ - na_ontap_vscan_scanner_pool_policy - scanner_pool apply policy support on modification.
+ - na_ontap_vserver_create(role) - lif creation now defaults to system-defined unless iscsi lif type.
+ - use_rest is now case insensitive.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.9.0.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.9.0.yaml
new file mode 100644
index 00000000..2315af8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/20.9.0.yaml
@@ -0,0 +1,17 @@
+minor_changes:
+ - na_ontap_cluster - ``node_name`` to set the node name when adding a node, or as an alternative to `cluster_ip_address`` to remove a node.
+ - na_ontap_cluster - ``state`` can be set to ``absent`` to remove a node identified with ``cluster_ip_address`` or ``node_name``.
+ - na_ontap_qtree - ``wait_for_completion`` and ``time_out`` to wait for qtree deletion when using REST.
+ - na_ontap_quotas - ``soft_disk_limit`` and ``soft_file_limit`` for the quota target.
+ - na_ontap_rest_info - Support for gather subsets - ``initiator_groups_info, san_fcp_services, san_iscsi_credentials, san_iscsi_services, san_lun_maps, storage_luns_info, storage_NVMe_namespaces.``
+
+bugfixes:
+ - na_ontap_cluster - ``check_mode`` is now working properly.
+ - na_ontap_interface - ``home_node`` is not required in pre-cluster mode.
+ - na_ontap_interface - ``role`` is not required if ``service_policy`` is present and ONTAP version is 9.8.
+ - na_ontap_interface - traceback in get_interface if node is not reachable.
+ - na_ontap_job_schedule - allow ``job_minutes`` to set number to -1 for job creation with REST too.
+ - na_ontap_qtree - fixed ``None is not subscriptable`` exception on rename operation.
+ - na_ontap_volume - fixed ``KeyError`` exception on ``size`` when reporting creation error.
+ - na_ontap_* - change version_added from '2.6' to '2.6.0' where applicable to satisfy sanity checker.
+ - netapp.py - uncaught exception (traceback) on zapi.NaApiError.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2426.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2426.yaml
new file mode 100644
index 00000000..1981166e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2426.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_snapmirror_policy - report error when attempting to change ``policy_type`` rather than taking no action.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2668.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2668.yaml
new file mode 100644
index 00000000..2af3377e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2668.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_volume - fix volume type modify issue by reporting error.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2964.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2964.yaml
new file mode 100644
index 00000000..bf01eaaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2964.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_info - New options ``cifs_options_info``, ``cluster_log_forwarding_info``, ``event_notification_destination_info``, ``event_notification_info``, ``security_login_role_config_info``, ``security_login_role_info`` have been added.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2965.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2965.yaml
new file mode 100644
index 00000000..6d86e908
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2965.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_cluster_peer - optional parameter ``ipspace`` added for cluster peer.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3113.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3113.yaml
new file mode 100644
index 00000000..bcefabb0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3113.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_cluster - ``single_node_cluster`` option was ignored.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3139.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3139.yaml
new file mode 100644
index 00000000..f32d6691
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3139.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_aggregate - support concurrent actions for rename/modify/add_object_store and create/add_object_store.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3149.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3149.yaml
new file mode 100644
index 00000000..0f09e24f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3149.yaml
@@ -0,0 +1,6 @@
+minor_changes:
+ - na_ontap_cifs - output ``modified`` if a modify action is taken.
+ - na_ontap_svm - output ``modified`` if a modify action is taken.
+
+bugfixes:
+ - na_ontap_cifs - fix idempotency issue when ``show-previous-versions`` is used.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3167.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3167.yaml
new file mode 100644
index 00000000..0888400e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3167.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_volume - ``encrypt`` with a value of ``false`` is ignored when creating a volume.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3178.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3178.yaml
new file mode 100644
index 00000000..585f302b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3178.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_rest_info - Support for gather subsets - ``application_info, application_template_info, autosupport_config_info , autosupport_messages_history, ontap_system_version, storage_flexcaches_info, storage_flexcaches_origin_info, storage_ports_info, storage_qos_policies, storage_qtrees_config, storage_quota_reports, storage_quota_policy_rules, storage_shelves_config, storage_snapshot_policies, support_ems_config, support_ems_events, support_ems_filters``
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3181.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3181.yaml
new file mode 100644
index 00000000..dbb99ad4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3181.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_quotas - New option ``activate_quota_on_change`` to resize or reinitialize quotas.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3194.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3194.yaml
new file mode 100644
index 00000000..34299bfe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3194.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_info - better reporting on KeyError traceback, option to ignore error. \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3251.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3251.yaml
new file mode 100644
index 00000000..94dd3585
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3251.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_info - KeyError on ``tree`` for quota_report_info.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3262.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3262.yaml
new file mode 100644
index 00000000..067c2d18
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3262.yaml
@@ -0,0 +1,6 @@
+minor_changes:
+ - na_ontap_mcc_mediator - improve error reporting when REST is not available.
+ - na_ontap_metrocluster - improve error reporting when REST is not available.
+ - na_ontap_wwpn_alias - improve error reporting when REST is not available.
+bugfixes:
+ - na_ontap_ipspace - invalid call in error reporting (double error).
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3304.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3304.yaml
new file mode 100644
index 00000000..3e9add07
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3304.yaml
@@ -0,0 +1,5 @@
+minor_changes:
+ - na_ontap_software_update - add `force_update` option to ignore current version.
+
+bugfixes:
+ - na_ontap_software_update - module is not idempotent.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3310.yml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3310.yml
new file mode 100644
index 00000000..bfef391b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3310.yml
@@ -0,0 +1,5 @@
+minor_changes:
+ - na_ontap_interface - minor example update.
+ - na_ontap_export_policy_rule - minor doc updates.
+bugfixes:
+ - na_ontap_info - Use ``node-id`` as key rather than ``current-version``.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3312.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3312.yaml
new file mode 100644
index 00000000..e38bfeef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3312.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_firmware_upgrade - fix ValueError issue when processing URL error.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3329.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3329.yaml
new file mode 100644
index 00000000..dacc27c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3329.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_svm - warning for ``aggr_list`` wildcard value(``*``) in create idempotency.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3346.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3346.yaml
new file mode 100644
index 00000000..51e0444f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3346.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_broadcast_domain_ports - handle ``changed`` for check_mode and report correctly.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3354.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3354.yaml
new file mode 100644
index 00000000..f1636604
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3354.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - All REST modules, will not fail if a job fails
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3358.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3358.yaml
new file mode 100644
index 00000000..c1214c15
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3358.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_info - do not require write access privileges. This also enables other modules to work in check_mode without write access permissions.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3366.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3366.yaml
new file mode 100644
index 00000000..ffd6382a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3366.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_lun - support modify for space_allocation and space_reserve.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3367.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3367.yaml
new file mode 100644
index 00000000..2a98711b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3367.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_lun - new option ``from_name`` to rename a LUN.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3368.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3368.yaml
new file mode 100644
index 00000000..18e2dd26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3368.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - na_ontap_volume - ``nas_application_template`` to create a volume using nas application REST API.
+ - na_ontap_volume - ``size_change_threshold`` to ignore small changes in volume size.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3369.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3369.yaml
new file mode 100644
index 00000000..7193ea40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3369.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_ontap_igroup - new option ``os_type`` to replace ``ostype`` (but ostype is still accepted).
+ - na_ontap_lun - new option ``os_type`` to replace ``ostype`` (but ostype is still accepted), and removed default to ``image``.
+ - na_ontap_lun - new option ``san_application_template`` to create LUNs without explicitly creating a volume and using REST APIs.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3371.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3371.yaml
new file mode 100644
index 00000000..78b525b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3371.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - na_ontap_volume - ``compression`` to enable compression on a FAS volume.
+ - na_ontap_volume - ``inline-compression`` to enable inline compression on a volume.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3385.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3385.yaml
new file mode 100644
index 00000000..2ce1895f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3385.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - all ZAPI modules - optimize Basic Authentication by adding Authorization header proactively.
+ - all ZAPI modules - new ``classic_basic_authorization`` feature_flag to disable adding Authorization header proactively.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3386.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3386.yaml
new file mode 100644
index 00000000..999c5c74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3386.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_user - application expects only ``service_processor`` but module supports ``service-processor``.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3390.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3390.yaml
new file mode 100644
index 00000000..735e0371
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3390.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_qos_policy_group - new option ``is_shared`` for sharing QOS SLOs or not.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3392.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3392.yaml
new file mode 100644
index 00000000..55b4a37e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3392.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_lun - new option ``qos_policy_group`` to assign a qos_policy_group to a LUN.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3399.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3399.yaml
new file mode 100644
index 00000000..7b48e095
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3399.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_quotas - New option ``perform_user_mapping`` to perform user mapping for the user specified in quota-target.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3400.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3400.yaml
new file mode 100644
index 00000000..27199ea4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3400.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_quota_policy - new option ``auto_assign`` to assign quota policy to vserver.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3401.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3401.yaml
new file mode 100644
index 00000000..ed3cb6ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3401.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_cifs - fix for AttributeError - 'NoneType' object has no attribute 'get' on line 300
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3442.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3442.yaml
new file mode 100644
index 00000000..6c5adf7d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3442.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_ontap_volume - checking for success before failure lead to 'NoneType' object has no attribute 'get_child_by_name' when modifying a Flexcache volume.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3443.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3443.yaml
new file mode 100644
index 00000000..e9e292f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3443.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_rest_info - Support for gather subsets - ``cifs_home_directory_info, cluster_software_download, event_notification_info, event_notification_destination_info, security_login_info, security_login_rest_role_info``
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3454.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3454.yaml
new file mode 100644
index 00000000..babca44d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3454.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_volume - ``sizing_method`` to resize a FlexGroup using REST.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/github-56.yaml b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/github-56.yaml
new file mode 100644
index 00000000..14f15a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/changelogs/fragments/github-56.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_ontap_lun - ``use_exact_size`` to create a lun with the exact given size so that the lun is not rounded up.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/README.md b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/README.md
new file mode 100644
index 00000000..59fbcb60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/README.md
@@ -0,0 +1,37 @@
+=============================================================
+
+ netapp.ontap
+
+ NetApp ONTAP Collection
+
+ Copyright (c) 2020 NetApp, Inc. All rights reserved.
+ Specifications subject to change without notice.
+
+=============================================================
+# Playbook examples
+
+As the name indicates, these are examples, and while they are working at the time of publication, we do not support these playbooks.
+We cannot guarantee they are working on other systems, or other configurations, or other versions than what we used at the time.
+We will not maintain these playbooks as time passes.
+
+## ONTAP Firmware Updates
+
+By default, downloading a firmware image is enough to trigger an update.
+The update happens automatically in background for the disk qualification package and for disk, shelf, and ACP firmwares. It is designed to be non disruptive.
+
+The SP firmware will be automatically installed, but requires a node reboot. The reboot is not done in these playbooks.
+
+The na_ontap_pb_upgrade_firmware playbooks are illustrating three ways to use variables in an Ansible playbook:
+1. directly inside the playbook, under the `vars:` keyword
+1. by importing an external file, under the `vars_file:` keyword
+1. by adding `--extra-vars` to the `ansible-playbook` command line. Using `@` enables to use a file rather than providing each variable explicitly.
+
+```
+ansible-playbook ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml
+
+ansible-playbook ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml
+
+ansible-playbook ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml --extra-vars=@/tmp/ansible/ontap_vars_file.yml
+```
+
+The advantage of using a vars_file is that you can keep important variables private. --extra-vars provides more flexibility regarding the location of the vars file. \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/json_query/README.md b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/json_query/README.md
new file mode 100644
index 00000000..0d3321af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/json_query/README.md
@@ -0,0 +1,30 @@
+=============================================================
+
+ netapp.ontap
+
+ NetApp ONTAP Collection
+
+ Copyright (c) 2020 NetApp, Inc. All rights reserved.
+ Specifications subject to change without notice.
+
+=============================================================
+# Playbook examples
+
+As the name indicates, these are examples, and while they are working at the time of publication, we do not support these playbooks.
+We cannot guarantee they are working on other systems, or other configurations, or other versions than what we used at the time.
+We will not maintain these playbooks as time passes.
+
+## ONTAP list volumes that are online, or offline
+
+The na_ontap_pb_get_online_volumes playbook illustrate two ways to use json_query:
+1. to flatten a complex structure and extract only the fields of interest,
+2. to filter the fields of interest based on some criteria.
+
+The na_ontap_pb_get_online_volumes playbook illustrates three ways to use variables in an Ansible playbook:
+1. directly inside the playbook, under the `vars:` keyword,
+1. by importing an external file, under the `vars_files:` keyword,
+1. by adding `--extra-vars` to the `ansible-playbook` command line. Using `@` enables to use a file rather than providing each variable explicitly.
+
+Note that `--extra-vars` has the highest precedence. `vars` has the lowest precedence. It is possible to comnbine the 3 techniques within a single playbook.
+
+The advantage of using a vars_file is that you can keep important variables private. --extra-vars provides more flexibility regarding the location of the vars file.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml
new file mode 100644
index 00000000..70c242e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml
@@ -0,0 +1,76 @@
+-
+ name: Get list of online ONTAP volumes
+ hosts: localhost
+ gather_facts: no
+ collections:
+ - netapp.ontap
+ vars_files:
+ # This will fail silently if the vars_file is not found. Remove '/dev/null' to force an error
+ # if --extra_vars is used to provide values for these variables, the values from vars_file are ignored
+ - ['/path/to/ontap_vars_file.yml', '/dev/null']
+
+ vars:
+ # TODO: change these value until DONE, unless a vars file or --extra_vars is used.
+ # If --extra_vars is used to provide values for these variables, the values below are ignored.
+ # If vars_files is used, the values below are ignored.
+ ontap_admin_ip: TBD
+ # username/password authentication
+ ontap_admin_username: admin
+ ontap_admin_password: TBD
+ # SSL certificate authentication
+ ontap_cert_filepath: "/path/to/test.pem"
+ ontap_key_filepath: "/path/to//test.key"
+ # optional, SVM login
+ ontap_svm_admin_ip: TBD
+ ontap_svm_admin_username: vsadmin
+ ontap_svm_admin_password: TBD
+ # we recommend to use https, with a valid certificate
+ ontap_use_https: true
+ ontap_validate_certs: false
+ # DONE
+ login: &login
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: "{{ ontap_use_https }}"
+ validate_certs: "{{ ontap_validate_certs }}"
+ cert_login: &cert_login
+ hostname: "{{ ontap_admin_ip }}"
+ cert_filepath: "{{ ontap_cert_filepath }}"
+ key_filepath: "{{ ontap_key_filepath }}"
+ https: true # ignored, as https is required for SSL
+ validate_certs: "{{ ontap_validate_certs }}"
+ svm_login: &svm_login
+ hostname: "{{ ontap_svm_admin_ip }}"
+ username: "{{ ontap_svm_admin_username }}"
+ password: "{{ ontap_svm_admin_password }}"
+ https: "{{ ontap_use_https }}"
+ validate_certs: "{{ ontap_validate_certs }}"
+ tasks:
+ - name: collect list of volumes, and state information
+ na_ontap_info:
+ <<: *cert_login
+ gather_subset: volume_info
+ desired_attributes:
+ volume-attributes:
+ volume-state-attributes:
+ state:
+ use_native_zapi_tags: false
+ register: ontap
+ - debug: var=ontap
+ tags: never
+ - set_fact:
+ volumes: "{{ ontap.ontap_info | json_query(get_attrs) }}"
+ vars:
+ get_attrs: "volume_info.*.{id: volume_id_attributes.name, svm: volume_id_attributes.owning_vserver_name, state: volume_state_attributes.state}"
+ - debug: var=volumes
+ - set_fact:
+ online_volumes: "{{ volumes | json_query(get_online) }}"
+ vars:
+ get_online: "[? state=='online']"
+ - debug: var=online_volumes
+ - set_fact:
+ offline_volumes: "{{ volumes | json_query(get_offline) }}"
+ vars:
+ get_offline: "[? state=='offline']"
+ - debug: var=offline_volumes
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate.yml b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate.yml
new file mode 100644
index 00000000..a6221e84
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate.yml
@@ -0,0 +1,209 @@
+# Example of installing a SSL certificate in ONTAP for authentication
+# This playbook:
+# 1. installs the certificate, or proceeds if the certificate is already installed,
+# 2. enables SSL client authentication,
+# 3. creates user account for cert authentication for ontapi and http applications,
+# 4. validates that cert authentication works
+#
+# in test mode (using tags: -t all,testpb):
+# 1b. the installation is repeated, to validate the check for idempotency (certificate already installed),
+# 5. user account for cert authentication for ontapi and http applications is deleted,
+# 6. if the certificate was installed in step 1, it is deleted.
+# The certificate can be manually deleted using something like:
+# security certificate delete -vserver trident_svm -common-name cert_user -ca cert_user -type *
+#
+# Prerequisites:
+# you must have generated a certificate and have the certificate file (.pem) and the private key file available.
+# This was tested using a self signed certificate:
+# https://netapp.io/2016/11/08/certificate-based-authentication-netapp-manageability-sdk-ontap/
+-
+ name: Ontap Install SSL certificate and enable SSL certificate authentication
+ hosts: localhost
+ gather_facts: no
+ collections:
+ - netapp.ontap
+ vars:
+ # TODO: change these variable values from HERE to DONE:
+ ontap_admin_ip: 10.XXX.XXX.X19
+ ontap_admin_username: admin
+ ontap_admin_password: XXXXXXXX
+ # we recommend to use https, but it requires a valid SSL certificate
+ ontap_use_https: true
+ ontap_validate_certs: false
+
+ # parameters to set up the certificate, ontap_cert_user must match the value of CN= when generating the certificate
+ ontap_cert_user: cert_user
+ ontap_cert_name: deleteme_cert
+ # admin or vsadmin
+ ontap_cert_role: vsadmin
+ # admin or data SVM
+ vserver: trident_svm
+ # admin or SVM IP address (for admin, would the same as ontap_admin_ip)
+ ontap_svm_ip: 10.XXX.XXX.X21
+ # certificate and private key files
+ cert_filepath: "/home/laurentn/atelier/ansible_wsl/ansible-playbooks/test.pem"
+ key_filepath: "/home/laurentn/atelier/ansible_wsl/ansible-playbooks/test.key"
+ # set this to false if the certificate is self-signed
+ validate_certs_for_ssl_auth: false
+
+ # you can either copy/paste the certificate(s) from the pem file, respecting the identation:
+ ssl_certificate_inline: |
+ -----BEGIN CERTIFICATE-----
+ MXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxx==
+ -----END CERTIFICATE-----
+
+ # or read it directly from the pem file
+ ssl_certificate_from_file: "{{lookup('file', cert_filepath)}}"
+
+ # pick one:
+ # ssl_certificate: "{{ ssl_certificate_inline }}"
+ ssl_certificate: "{{ ssl_certificate_from_file }}"
+
+ # DONE - do not change anything else (unless you really want to)
+
+ # this will be used to authenticate using SSL certificate
+ cert_login: &cert_login
+ hostname: "{{ ontap_svm_ip }}"
+ cert_filepath: "{{ cert_filepath }}"
+ key_filepath: "{{ key_filepath }}"
+ https: true
+ validate_certs: "{{ validate_certs_for_ssl_auth }}"
+
+ login: &login
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: "{{ ontap_use_https }}"
+ validate_certs: "{{ ontap_validate_certs }}"
+
+ tasks:
+ - name: run ontap info module to check connectivity
+ na_ontap_info:
+ <<: *login
+ gather_subset: ontap_system_version
+ register: ontap
+ - debug: var=ontap.ontap_info.ontap_version
+
+ - name: use ZAPIT to install certificate
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ security-certificate-install:
+ cert-name: "{{ ontap_cert_name }}"
+ certificate: "{{ ssl_certificate }}"
+ type: client-ca
+ vserver: "{{ vserver }}"
+ ignore_errors: true
+ register: ontap
+ - debug: var=ontap
+ - fail:
+ msg: "Failed to install certificate: {{ ontap }}"
+ when: ontap.failed and ontap.reason != "duplicate entry"
+ - name: collect certificate data to be able to delete it later when testing
+ tags: never,testpb
+ set_fact:
+ certificate_authority: "{{ ontap.response.ca | default('unknown') }}"
+ serial_number: "{{ ontap.response.serial | default(0) }}"
+ certificate_installed: "{{ not ontap.failed }}"
+ - debug: var=certificate_authority
+ tags: never,testpb
+ - debug: var=serial_number
+ tags: never,testpb
+ - debug: var=certificate_installed
+ tags: never,testpb
+
+ - name: use ZAPIT to install certificate (idempotency)
+ # use -t all,testpb when testing the playbook
+ tags: never,testpb
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ security-certificate-install:
+ cert-name: "{{ ontap_cert_name }}"
+ certificate: "{{ ssl_certificate }}"
+ type: client-ca
+ vserver: "{{ vserver }}"
+ ignore_errors: true
+ register: ontap
+ - debug: var=ontap
+ tags: never,testpb
+ - fail:
+ msg: "Failed to install certificate: {{ ontap }}"
+ tags: never,testpb
+ when: ontap.failed and ontap.reason != "duplicate entry"
+
+ - name: use ZAPIT to enable certificate authentication
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ security-ssl-modify:
+ client-authentication-enabled: true
+ vserver: "{{ vserver }}"
+ register: ontap
+ - debug: var=ontap
+ tags: never,testpb
+
+ - name: set up cert authentication for ontapi (ZAPI) and http (REST)
+ na_ontap_user:
+ <<: *login
+ applications: ontapi,http
+ authentication_method: cert
+ name: "{{ ontap_cert_user }}"
+ role_name: "{{ ontap_cert_role }}"
+ vserver: "{{ vserver }}"
+ register: ontap
+ - debug: var=ontap
+ tags: never,testpb
+
+ - name: validate cert authentication is working for ZAPI
+ na_ontap_info:
+ <<: *cert_login
+ gather_subset: ontap_version
+ register: ontap
+ - debug: var=ontap
+
+ - name: remove cert authentication for ontapi (ZAPI) and http (REST) when testing
+ tags: never,testpb
+ na_ontap_user:
+ <<: *login
+ state: absent
+ applications: ontapi,http
+ authentication_method: cert
+ name: "{{ ontap_cert_user }}"
+ role_name: "{{ ontap_cert_role }}"
+ vserver: "{{ vserver }}"
+ register: ontap
+ - debug: var=ontap
+ tags: never,testpb
+
+ - name: use ZAPIT to delete certificate when testing
+ # use -t all,never when testing the playbook
+ tags: never,testpb,delete
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ security-certificate-delete:
+ certificate-authority: "{{ certificate_authority }}"
+ common-name: "{{ certificate_authority }}"
+ serial-number: "{{ serial_number }}"
+ type: client-ca
+ vserver: "{{ vserver }}"
+ when: certificate_installed
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml
new file mode 100644
index 00000000..3aabe0be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml
@@ -0,0 +1,202 @@
+# Example of installing a SSL certificate in ONTAP for authentication
+# This playbook:
+# 1. installs the certificate, or proceeds if the certificate is already installed,
+# (this also enables SSL client authentication),
+# 2. creates user account for cert authentication for ontapi and http applications,
+# 3. validates that cert authentication works
+#
+# in test mode (using tags: -t all,testpb):
+# 1b. the installation is repeated, to validate the check for idempotency (certificate already installed),
+# 4. user account for cert authentication for ontapi and http applications is deleted,
+# 6. if the certificate was installed in step 1, it is deleted.
+# The certificate can be manually deleted using something like:
+# security certificate delete -vserver trident_svm -common-name cert_user -ca cert_user -type *
+#
+# Prerequisites:
+# you must have generated a certificate and have the certificate file (.pem) and the private key file available.
+# This was tested using a self signed certificate:
+# https://netapp.io/2016/11/08/certificate-based-authentication-netapp-manageability-sdk-ontap/
+-
+ name: Ontap Install SSL certificate and enable SSL certificate authentication
+ hosts: localhost
+ gather_facts: no
+ collections:
+ - netapp.ontap
+ vars:
+ # TODO: change these variable values from HERE to DONE:
+ ontap_admin_ip: 10.xxx.xxx.x19
+ ontap_admin_username: admin
+ ontap_admin_password: xxxxxxxxx
+ # we recommend to use https, but it requires a valid SSL certificate
+ ontap_use_https: true
+ ontap_validate_certs: false
+
+ # parameters to set up the certificate, ontap_cert_user must match the value of CN= when generating the certificate
+ ontap_cert_user: cert_user
+ ontap_cert_name: testme-cert
+ # admin or vsadmin
+ ontap_cert_role: vsadmin
+ # data SVM
+ svm: trident_svm
+ # uncomment and leave the value empty for cluster certificate
+ # svm:
+ # admin or SVM IP address (for admin, would the same as ontap_admin_ip)
+ ontap_svm_ip: 10.XXX.XXX.X21
+ # certificate and private key files
+ cert_filepath: "/home/laurentn/atelier/ansible_wsl/ansible-playbooks/test.pem"
+ key_filepath: "/home/laurentn/atelier/ansible_wsl/ansible-playbooks/test.key"
+ # set this to false if the certificate is self-signed
+ validate_certs_for_ssl_auth: false
+
+ # you can either copy/paste the certificate(s) from the pem file, respecting the identation:
+ ssl_certificate_inline: |
+ -----BEGIN CERTIFICATE-----
+ MXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx
+ XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxx==
+ -----END CERTIFICATE-----
+
+ # or read it directly from the pem file
+ ssl_certificate_from_file: "{{lookup('file', cert_filepath)}}"
+
+ # pick one:
+ # ssl_certificate: "{{ ssl_certificate_inline }}"
+ ssl_certificate: "{{ ssl_certificate_from_file }}"
+
+ # DONE - do not change anything else (unless you really want to)
+
+ # this will be used to authenticate using SSL certificate
+ cert_login: &cert_login
+ hostname: "{{ ontap_admin_ip }}"
+ cert_filepath: "{{ cert_filepath }}"
+ key_filepath: "{{ key_filepath }}"
+ https: true
+ validate_certs: "{{ validate_certs_for_ssl_auth }}"
+
+ login: &login
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: "{{ ontap_use_https }}"
+ validate_certs: "{{ ontap_validate_certs }}"
+
+ tasks:
+ - name: run ontap info module to check connectivity
+ na_ontap_info:
+ <<: *login
+ gather_subset: ontap_system_version
+ register: ontap
+ - debug: var=ontap.ontap_info.ontap_version
+
+ - name: install certificate
+ na_ontap_security_certificates:
+ <<: *login
+ common_name: "{{ ontap_cert_user }}"
+ name: "{{ ontap_cert_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+ svm: "{{ svm }}"
+ register: result
+ - debug: var=result
+ - assert: { that: result.changed, quiet: True }
+
+ - name: install certificate (idempotency test)
+ # use -t all,testpb when testing the playbook
+ tags: never,testpb
+ na_ontap_security_certificates:
+ <<: *login
+ common_name: "{{ ontap_cert_user }}"
+ name: "{{ ontap_cert_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+ svm: "{{ svm }}"
+ register: result
+ - debug: var=result
+ tags: never,testpb
+ - assert: { that: not result.changed, quiet: True }
+ tags: never,testpb
+
+ - name: set up cert authentication for ontapi (ZAPI) and http (REST)
+ na_ontap_user:
+ <<: *login
+ applications: ontapi,http
+ authentication_method: cert
+ name: "{{ ontap_cert_user }}"
+ role_name: "{{ ontap_cert_role }}"
+ svm: "{{ svm }}"
+ use_rest: Always
+ register: result
+ - debug: var=result
+ tags: never,testpb
+ - assert: { that: result.changed, quiet: True }
+ tags: never,testpb
+
+ - name: validate cert authentication is working for REST
+ na_ontap_rest_info:
+ <<: *cert_login
+ gather_subset: vserver_info
+ register: result
+ - debug: var=result
+
+ - name: remove cert authentication for ontapi (ZAPI) and http (REST) when testing
+ tags: never,testpb
+ na_ontap_user:
+ <<: *login
+ state: absent
+ applications: ontapi,http
+ authentication_method: cert
+ name: "{{ ontap_cert_user }}"
+ role_name: "{{ ontap_cert_role }}"
+ svm: "{{ svm }}"
+ use_rest: Always
+ register: result
+ - debug: var=result
+ tags: never,testpb
+ - assert: { that: result.changed, quiet: True }
+ tags: never,testpb
+
+ - name: delete certificate when testing
+ # use -t all,never when testing the playbook
+ tags: never,testpb,delete
+ na_ontap_security_certificates:
+ <<: *login
+ common_name: "{{ ontap_cert_user }}"
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ svm }}"
+ state: absent
+ register: result
+ - debug: var=result
+ tags: never,testpb,delete
+ - assert: { that: result.changed, quiet: True }
+ tags: never,testpb,delete
+
+ - name: delete certificate when testing (idempotemcy)
+ # use -t all,never when testing the playbook
+ tags: never,testpb,delete
+ na_ontap_security_certificates:
+ <<: *login
+ common_name: "{{ ontap_cert_user }}"
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ svm }}"
+ state: absent
+ register: result
+ - debug: var=result
+ tags: never,testpb,delete
+ - assert: { that: not result.changed, quiet: True }
+ tags: never,testpb,delete
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml
new file mode 100644
index 00000000..9ec10865
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml
@@ -0,0 +1,46 @@
+-
+ name: Ontap Upgrade Firmware
+ hosts: localhost
+ gather_facts: no
+ collections:
+ - netapp.ontap
+ vars:
+ # TODO: change these variable values
+ ontap_firmware_url: TBD
+ ontap_admin_ip: TBD
+ ontap_admin_username: admin
+ ontap_admin_password: TBD
+ # we recommend to use https, but it requires a valid SSL certificate
+ ontap_use_https: true
+ ontap_validate_certs: false
+ # DONE - do not change anything else
+
+ login: &login
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: "{{ ontap_use_https }}"
+ validate_certs: "{{ ontap_validate_certs }}"
+
+ tasks:
+ - name: run ontap info module to check connectivity
+ na_ontap_info:
+ <<: *login
+ gather_subset: ontap_system_version
+ register: ontap
+ - debug: var=ontap
+
+ - name: run ontap command module to validate access permissions
+ na_ontap_command:
+ <<: *login
+ command: version
+ return_dict: false
+ register: ontap
+ - debug: var=ontap
+
+ - name: run ontap firmware download module
+ na_ontap_firmware_upgrade:
+ <<: *login
+ package_url: "{{ ontap_firmware_url }}"
+ register: ontap
+ - debug: var=ontap \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml
new file mode 100644
index 00000000..d55dec10
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml
@@ -0,0 +1,47 @@
+-
+ name: Ontap Upgrade Firmware
+ hosts: localhost
+ gather_facts: no
+ collections:
+ - netapp.ontap
+ vars:
+ # TODO: use --extra_vars to provide values for these variables
+ # ontap_firmware_url: TBD
+ # ontap_admin_ip: TBD
+ # ontap_admin_username: admin
+ # ontap_admin_password: TBD
+ # we recommend to use https, but it requires a valid SSL certificate
+ # if these variables are defined in --extra_vars, the following values are ignored
+ ontap_use_https: true
+ ontap_validate_certs: false
+ # do not change anything else
+
+ login: &login
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: "{{ ontap_use_https }}"
+ validate_certs: "{{ ontap_validate_certs }}"
+
+ tasks:
+ - name: run ontap info module to check connectivity
+ na_ontap_info:
+ <<: *login
+ gather_subset: ontap_system_version
+ register: ontap
+ - debug: var=ontap
+
+ - name: run ontap command module to validate access permissions
+ na_ontap_command:
+ <<: *login
+ command: version
+ return_dict: false
+ register: ontap
+ - debug: var=ontap
+
+ - name: run ontap firmware download module
+ na_ontap_firmware_upgrade:
+ <<: *login
+ package_url: "{{ ontap_firmware_url }}"
+ register: ontap
+ - debug: var=ontap \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml
new file mode 100644
index 00000000..d8a68c63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml
@@ -0,0 +1,45 @@
+-
+ name: Ontap Upgrade Firmware
+ hosts: localhost
+ gather_facts: no
+ collections:
+ - netapp.ontap
+ vars_files:
+ # TODO change this path as needed
+ - /tmp/ansible/ontap_vars_file.yml
+ vars:
+ # we recommend to use https, but it requires a valid SSL certificate
+ # if these variables are defined in the vars file, the following values are ignored
+ ontap_use_https: true
+ ontap_validate_certs: false
+ # DONE - do not change anything else
+
+ login: &login
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: "{{ ontap_use_https }}"
+ validate_certs: "{{ ontap_validate_certs }}"
+
+ tasks:
+ - name: run ontap info module to check connectivity
+ na_ontap_info:
+ <<: *login
+ gather_subset: ontap_system_version
+ register: ontap
+ - debug: var=ontap
+
+ - name: run ontap command module to validate access permissions
+ na_ontap_command:
+ <<: *login
+ command: version
+ return_dict: false
+ register: ontap
+ - debug: var=ontap
+
+ - name: run ontap firmware download module
+ na_ontap_firmware_upgrade:
+ <<: *login
+ package_url: "{{ ontap_firmware_url }}"
+ register: ontap
+ - debug: var=ontap \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/ontap_vars_file.yml b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/ontap_vars_file.yml
new file mode 100644
index 00000000..7675e295
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/playbooks/examples/ontap_vars_file.yml
@@ -0,0 +1,27 @@
+# TODO: change these variable values
+ontap_admin_ip: TBD
+# either username/passord credentials
+ontap_admin_username: admin
+ontap_admin_password: TBD
+# or SSL certificate authentication
+ontap_cert_filepath: "/home/TBD/test.pem"
+ontap_key_filepath: "/home/TBD/test.key"
+# we recommend to use https, but it requires a valid SSL certificate
+ontap_use_https: true
+ontap_validate_certs: false
+# Optionally, SVM credentials
+ontap_svm_admin_ip: TBD
+ontap_svm_admin_username: vsadmin
+ontap_svm_admin_password: TBD
+# Optionally, to upgrade disk, shelf, acp firmware
+ontap_firmware_url: TBD
+# DONE - do not change anything else
+#
+# To use this file:
+# option 1: use ansible-playbook command line argument --extra-vars=@<path to this file>
+# for instance:
+# ansible-playbook ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml --extra-vars=@/tmp/ansible/ontap_vars_file.yml
+# option 2: include this file in your playbook using vars_files:
+# for instance:
+# vars_files:
+# - <path to vars file>
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py
new file mode 100644
index 00000000..725d3248
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+
+ # Documentation fragment for ONTAP (na_ontap)
+ NA_ONTAP = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ type: str
+ required: true
+ username:
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ - Two authentication methods are supported
+ - 1. basic authentication, using username and password,
+ - 2. SSL certificate authentication, using a ssl client cert file, and optionally a private key file.
+ - To use a certificate, the certificate must have been installed in the ONTAP cluster, and cert authentication must have been enabled.
+ type: str
+ aliases: [ user ]
+ password:
+ description:
+ - Password for the specified user.
+ type: str
+ aliases: [ pass ]
+ cert_filepath:
+ description:
+ - path to SSL client cert file (.pem).
+ - not supported with python 2.6.
+ type: str
+ version_added: 20.6.0
+ key_filepath:
+ description:
+ - path to SSL client key file.
+ type: str
+ version_added: 20.6.0
+ https:
+ description:
+ - Enable and disable https.
+ - Ignored when using REST as only https is supported.
+ - Ignored when using SSL certificate authentication as it requires SSL.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ http_port:
+ description:
+ - Override the default port (80 or 443) with this port
+ type: int
+ ontapi:
+ description:
+ - The ontap api version to use
+ type: int
+ use_rest:
+ description:
+ - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
+ - always -- will always use the REST API
+ - never -- will always use the ZAPI
+ - auto -- will try to use the REST Api
+ default: auto
+ type: str
+ feature_flags:
+ description:
+ - Enable or disable a new feature.
+ - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility.
+ - Supported keys and values are subject to change without notice. Unknown keys are ignored.
+ type: dict
+ version_added: "20.5.0"
+
+
+requirements:
+ - A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward.
+ - REST support requires ONTAP 9.6 or later.
+ - Ansible 2.6
+ - Ansible 2.9 or later is strongly recommended as it enables the new collection delivery system.
+ - Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
+ - Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
+ - netapp-lib 2020.3.12 is strongly recommended as it provides better error reporting for connection issues.
+ - To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
+
+notes:
+ - The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
+
+'''
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py
new file mode 100644
index 00000000..73693f54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py
@@ -0,0 +1,745 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''
+netapp.py
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import os
+import ssl
+import time
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+COLLECTION_VERSION = "20.12.0"
+
+try:
+ from netapp_lib.api.zapi import zapi
+ HAS_NETAPP_LIB = True
+except ImportError:
+ HAS_NETAPP_LIB = False
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+ERROR_MSG = dict(
+ no_cserver='This module is expected to run as cluster admin'
+)
+
+try:
+ from solidfire.factory import ElementFactory
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+
+def has_netapp_lib():
+ return HAS_NETAPP_LIB
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def na_ontap_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=False, type='str', aliases=['user']),
+ password=dict(required=False, type='str', aliases=['pass'], no_log=True),
+ https=dict(required=False, type='bool', default=False),
+ validate_certs=dict(required=False, type='bool', default=True),
+ http_port=dict(required=False, type='int'),
+ ontapi=dict(required=False, type='int'),
+ use_rest=dict(required=False, type='str', default='auto'),
+ feature_flags=dict(required=False, type='dict', default=dict()),
+ cert_filepath=dict(required=False, type='str'),
+ key_filepath=dict(required=False, type='str'),
+ )
+
+
+def has_feature(module, feature_name):
+ feature = get_feature(module, feature_name)
+ if isinstance(feature, bool):
+ return feature
+ module.fail_json(msg="Error: expected bool type for feature flag: %s" % feature_name)
+
+
+def get_feature(module, feature_name):
+ ''' if the user has configured the feature, use it
+ otherwise, use our default
+ '''
+ default_flags = dict(
+ check_required_params_for_none=True,
+ classic_basic_authorization=False, # use ZAPI wrapper to send Authorization header
+ deprecation_warning=True,
+ sanitize_xml=True,
+ sanitize_code_points=[8], # unicode values, 8 is backspace
+ show_modified=True
+ )
+
+ if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']:
+ return module.params['feature_flags'][feature_name]
+ if feature_name in default_flags:
+ return default_flags[feature_name]
+ module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name)
+
+
+def create_sf_connection(module, port=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_SF_SDK and hostname and username and password:
+ try:
+ return_val = ElementFactory.create(hostname, username, password, port=port)
+ return return_val
+ except Exception:
+ raise Exception("Unable to create SF connection")
+ else:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+
+def set_auth_method(module, username, password, cert_filepath, key_filepath):
+ error = None
+ if password is None and username is None:
+ if cert_filepath is None and key_filepath is not None:
+ error = 'Error: cannot have a key file without a cert file'
+ elif cert_filepath is None:
+ error = 'Error: ONTAP module requires username/password or SSL certificate file(s)'
+ elif key_filepath is None:
+ auth_method = 'single_cert'
+ else:
+ auth_method = 'cert_key'
+ elif password is not None and username is not None:
+ if cert_filepath is not None or key_filepath is not None:
+ error = 'Error: cannot have both basic authentication (username/password) ' +\
+ 'and certificate authentication (cert/key files)'
+ elif has_feature(module, 'classic_basic_authorization'):
+ auth_method = 'basic_auth'
+ else:
+ auth_method = 'speedy_basic_auth'
+ else:
+ error = 'Error: username and password have to be provided together'
+ if cert_filepath is not None or key_filepath is not None:
+ error += ' and cannot be used with cert or key files'
+ if error:
+ module.fail_json(msg=error)
+ return auth_method
+
+
+def setup_na_ontap_zapi(module, vserver=None, wrap_zapi=False):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ https = module.params['https']
+ validate_certs = module.params['validate_certs']
+ port = module.params['http_port']
+ version = module.params['ontapi']
+ cert_filepath = module.params['cert_filepath']
+ key_filepath = module.params['key_filepath']
+ auth_method = set_auth_method(module, username, password, cert_filepath, key_filepath)
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ if auth_method in ('single_cert', 'cert_key'):
+ # override NaServer in netapp-lib to enable certificate authentication
+ server = OntapZAPICx(hostname, module=module, username=username, password=password,
+ validate_certs=validate_certs, cert_filepath=cert_filepath,
+ key_filepath=key_filepath, style=zapi.NaServer.STYLE_CERTIFICATE,
+ auth_method=auth_method)
+ # SSL certificate authentication requires SSL
+ https = True
+ elif auth_method == 'speedy_basic_auth' or wrap_zapi:
+ # override NaServer in netapp-lib to add Authorization header preemptively
+ # use wrapper to handle parse error (mostly for na_ontap_command)
+ server = OntapZAPICx(hostname, module=module, username=username, password=password,
+ validate_certs=validate_certs, auth_method=auth_method)
+ else:
+ # legacy netapp-lib
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ if version:
+ minor = version
+ else:
+ minor = 110
+ server.set_api_version(major=1, minor=minor)
+ # default is HTTP
+ if https:
+ if port is None:
+ port = 443
+ transport_type = 'HTTPS'
+ # HACK to bypass certificate verification
+ if validate_certs is False:
+ if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ else:
+ if port is None:
+ port = 80
+ transport_type = 'HTTP'
+ server.set_transport_type(transport_type)
+ server.set_port(port)
+ server.set_server_type('FILER')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def is_zapi_connection_error(message):
+ ''' return True if it is a connection issue '''
+ # netapp-lib message may contain a tuple or a str!
+ if isinstance(message, tuple) and isinstance(message[0], ConnectionError):
+ return True
+ if isinstance(message, str) and message.startswith(('URLError', 'Unauthorized')):
+ return True
+ return False
+
+
+def is_zapi_write_access_error(message):
+ ''' return True if it is a connection issue '''
+ # netapp-lib message may contain a tuple or a str!
+ if isinstance(message, str) and message.startswith('Insufficient privileges:'):
+ return 'does not have write access' in message
+ return False
+
+
+def ems_log_event(source, server, name="Ansible", ident="12345", version=COLLECTION_VERSION,
+ category="Information", event="setup", autosupport="false"):
+ ems_log = zapi.NaElement('ems-autosupport-log')
+ # Host name invoking the API.
+ ems_log.add_new_child("computer-name", name)
+ # ID of event. A user defined event-id, range [0..2^32-2].
+ ems_log.add_new_child("event-id", ident)
+ # Name of the application invoking the API.
+ ems_log.add_new_child("event-source", source)
+ # Version of application invoking the API.
+ ems_log.add_new_child("app-version", version)
+ # Application defined category of the event.
+ ems_log.add_new_child("category", category)
+ # Description of event to log. An application defined message to log.
+ ems_log.add_new_child("event-description", event)
+ ems_log.add_new_child("log-level", "6")
+ ems_log.add_new_child("auto-support", autosupport)
+ try:
+ server.invoke_successfully(ems_log, True)
+ except zapi.NaApiError as exc:
+ # Do not fail if we can't connect to the server.
+ # The module will report a better error when trying to get some data from ONTAP.
+ # Do not fail if we don't have write privileges.
+ if not is_zapi_connection_error(exc.message) and not is_zapi_write_access_error(exc.message):
+ # raise on other errors, as it may be a bug in calling the ZAPI
+ raise exc
+
+
+def get_cserver_zapi(server):
+ ''' returns None if not run on the management or cluster IP '''
+ vserver_info = zapi.NaElement('vserver-get-iter')
+ query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
+ query = zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+ try:
+ result = server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+ except zapi.NaApiError as exc:
+ # Do not fail if we can't connect to the server.
+ # The module will report a better error when trying to get some data from ONTAP.
+ if is_zapi_connection_error(exc.message):
+ return None
+ # raise on other errors, as it may be a bug in calling the ZAPI
+ raise exc
+ attribute_list = result.get_child_by_name('attributes-list')
+ if attribute_list is not None:
+ vserver_list = attribute_list.get_child_by_name('vserver-info')
+ if vserver_list is not None:
+ return vserver_list.get_child_content('vserver-name')
+ return None
+
+
+def classify_zapi_exception(error):
+ ''' return type of error '''
+ try:
+ # very unlikely to fail, but don't take any chance
+ err_code = int(error.code)
+ except (AttributeError, ValueError):
+ err_code = 0
+ try:
+ # very unlikely to fail, but don't take any chance
+ err_msg = error.message
+ except AttributeError:
+ err_msg = ""
+ if err_code == 13005 and err_msg.startswith('Unable to find API:') and 'data vserver' in err_msg:
+ return 'missing_vserver_api_error', 'Most likely running a cluster level API as vserver: %s' % to_native(error)
+ if err_code == 13001 and err_msg.startswith("RPC: Couldn't make connection"):
+ return 'rpc_error', to_native(error)
+ return "other_error", to_native(error)
+
+
+def get_cserver(connection, is_rest=False):
+ if not is_rest:
+ return get_cserver_zapi(connection)
+
+ params = {'fields': 'type'}
+ api = "private/cli/vserver"
+ json, error = connection.get(api, params)
+ if json is None or error is not None:
+ # exit if there is an error or no data
+ return None
+ vservers = json.get('records')
+ if vservers is not None:
+ for vserver in vservers:
+ if vserver['type'] == 'admin': # cluster admin
+ return vserver['vserver']
+ if len(vservers) == 1: # assume vserver admin
+ return vservers[0]['vserver']
+
+ return None
+
+
+if HAS_NETAPP_LIB:
+ class OntapZAPICx(zapi.NaServer):
+ ''' override zapi NaServer class to:
+ - enable SSL certificate authentication
+ - ignore invalid XML characters in ONTAP output (when using CLI module)
+ - add Authorization header when using basic authentication
+ '''
+ def __init__(self, hostname=None, server_type=zapi.NaServer.SERVER_TYPE_FILER,
+ transport_type=zapi.NaServer.TRANSPORT_TYPE_HTTP,
+ style=zapi.NaServer.STYLE_LOGIN_PASSWORD, username=None,
+ password=None, port=None, trace=False, module=None,
+ cert_filepath=None, key_filepath=None, validate_certs=None,
+ auth_method=None):
+ # python 2.x syntax, but works for python 3 as well
+ super(OntapZAPICx, self).__init__(hostname, server_type=server_type,
+ transport_type=transport_type,
+ style=style, username=username,
+ password=password, port=port, trace=trace)
+ self.cert_filepath = cert_filepath
+ self.key_filepath = key_filepath
+ self.validate_certs = validate_certs
+ self.module = module
+ self.base64_creds = None
+ if auth_method == 'speedy_basic_auth':
+ auth = '%s:%s' % (username, password)
+ self.base64_creds = base64.b64encode(auth.encode()).decode()
+
+ def _create_certificate_auth_handler(self):
+ try:
+ context = ssl.create_default_context()
+ except AttributeError as exc:
+ msg = 'SSL certificate authentication requires python 2.7 or later.'
+ msg += ' More info: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+ if not self.validate_certs:
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ try:
+ context.load_cert_chain(self.cert_filepath, keyfile=self.key_filepath)
+ except IOError as exc: # python 2.7 does not have FileNotFoundError
+ msg = 'Cannot load SSL certificate, check files exist.'
+ msg += ' More info: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+ return zapi.urllib.request.HTTPSHandler(context=context)
+
+ def _parse_response(self, response):
+ ''' handling XML parsing exception '''
+ try:
+ return super(OntapZAPICx, self)._parse_response(response)
+ except zapi.etree.XMLSyntaxError as exc:
+ if has_feature(self.module, 'sanitize_xml'):
+ # some ONTAP CLI commands return BEL on error
+ new_response = response.replace(b'\x07\n', b'')
+ # And 9.1 uses \r\n rather than \n !
+ new_response = new_response.replace(b'\x07\r\n', b'')
+ # And 9.7 may send backspaces
+ for code_point in get_feature(self.module, 'sanitize_code_points'):
+ if bytes([8]) == b'\x08': # python 3
+ byte = bytes([code_point])
+ elif chr(8) == b'\x08': # python 2
+ byte = chr(code_point)
+ else: # very unlikely, noop
+ byte = b'.'
+ new_response = new_response.replace(byte, b'.')
+ try:
+ return super(OntapZAPICx, self)._parse_response(new_response)
+ except Exception:
+ # ignore a second exception, we'll report the first one
+ pass
+ try:
+ # report first exception, but include full response
+ exc.msg += ". Received: %s" % response
+ except Exception:
+ # in case the response is very badly formatted, ignore it
+ pass
+ raise exc
+
+ def _create_request(self, na_element, enable_tunneling=False):
+ ''' intercept newly created request to add Authorization header '''
+ request, netapp_element = super(OntapZAPICx, self)._create_request(na_element, enable_tunneling=enable_tunneling)
+ if self.base64_creds is not None:
+ request.add_header("Authorization", "Basic %s" % self.base64_creds)
+ return request, netapp_element
+
+
+class OntapRestAPI(object):
+ ''' wrapper to send requests to ONTAP REST APIs '''
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.hostname = self.module.params['hostname']
+ self.use_rest = self.module.params['use_rest'].lower()
+ self.cert_filepath = self.module.params['cert_filepath']
+ self.key_filepath = self.module.params['key_filepath']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ port = self.module.params['http_port']
+ if port is None:
+ self.url = 'https://' + self.hostname + '/api/'
+ else:
+ self.url = 'https://%s:%d/api/' % (self.hostname, port)
+ self.is_rest_error = None
+ self.ontap_version = dict(
+ full='unknown',
+ generation=-1,
+ major=-1,
+ minor=-1,
+ valid=False
+ )
+ self.errors = list()
+ self.debug_logs = list()
+ self.auth_method = set_auth_method(self.module, self.username, self.password, self.cert_filepath, self.key_filepath)
+ self.check_required_library()
+
+ def requires_ontap_9_6(self, module_name):
+ self.requires_ontap_version(module_name)
+
+ def requires_ontap_version(self, module_name, version='9.6'):
+ suffix = " - %s" % self.is_rest_error if self.is_rest_error is not None else ""
+ return "%s only support REST, and requires ONTAP %s or later.%s" % (module_name, version, suffix)
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None, accept=None,
+ vserver_name=None, vserver_uuid=None):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+ headers = None
+ if accept is not None or vserver_name is not None or vserver_uuid is not None:
+ headers = dict()
+ # accept is used to turn on/off HAL linking
+ if accept is not None:
+ headers['accept'] = accept
+ # vserver tunneling using vserver name and/or UUID
+ if vserver_name is not None:
+ headers['X-Dot-SVM-Name'] = vserver_name
+ if vserver_uuid is not None:
+ headers['X-Dot-SVM-UUID'] = vserver_uuid
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+ except ValueError:
+ return None, None
+ error = json.get('error')
+ return json, error
+
+ if self.auth_method == 'single_cert':
+ kwargs = dict(cert=self.cert_filepath)
+ elif self.auth_method == 'cert_key':
+ kwargs = dict(cert=(self.cert_filepath, self.key_filepath))
+ elif self.auth_method in ('basic_auth', 'speedy_basic_auth'):
+ # with requests, there is no challenge, eg no 401.
+ kwargs = dict(auth=(self.username, self.password))
+ else:
+ raise KeyError(self.auth_method)
+
+ try:
+ response = requests.request(method, url, verify=self.verify, params=params,
+ timeout=self.timeout, json=json, headers=headers, **kwargs)
+ content = response.content # for debug purposes
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ response.raise_for_status()
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+ # If an error was reported in the json payload, it is handled below
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ self.log_debug(status_code, content)
+ if not json_dict and method == 'OPTIONS':
+ # OPTIONS provides the list of supported verbs
+ json_dict['Allow'] = response.headers['Allow']
+ return status_code, json_dict, error_details
+
+ def wait_on_job(self, job, timeout=600, increment=60):
+ try:
+ url = job['_links']['self']['href'].split('api/')[1]
+ except Exception as err:
+ self.log_error(0, 'URL Incorrect format: %s\n Job: %s' % (err, job))
+ # Expecting job to be in the following format
+ # {'job':
+ # {'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7',
+ # '_links':
+ # {'self':
+ # {'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}
+ # }
+ # }
+ # }
+ keep_running = True
+ error = None
+ message = None
+ runtime = 0
+ retries = 0
+ max_retries = 3
+ while keep_running:
+ # Will run every every <increment> seconds for <timeout> seconds
+ job_json, job_error = self.get(url, None)
+ if job_error:
+ error = job_error
+ retries += 1
+ if retries > max_retries:
+ self.log_error(0, 'Job error: Reach max retries.')
+ break
+ else:
+ retries = 0
+ # a job looks like this
+ # {
+ # "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14",
+ # "description": "POST /api/cluster/metrocluster",
+ # "state": "failure",
+ # "message": "There are not enough disks in Pool1.", **OPTIONAL**
+ # "code": 2432836,
+ # "start_time": "2020-02-26T10:35:44-08:00",
+ # "end_time": "2020-02-26T10:47:38-08:00",
+ # "_links": {
+ # "self": {
+ # "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14"
+ # }
+ # }
+ # }
+
+ message = job_json.get('message', '')
+ if job_json['state'] == 'failure':
+ # if the job as failed, return message as error
+ return None, message
+ if job_json['state'] != 'running':
+ keep_running = False
+ else:
+ # Would like to post a message to user (not sure how)
+ if runtime >= timeout:
+ keep_running = False
+ if job_json['state'] != 'success':
+ self.log_error(0, 'Timeout error: Process still running')
+ if keep_running:
+ time.sleep(increment)
+ runtime += increment
+ return message, error
+
+ def get(self, api, params=None):
+ method = 'GET'
+ dummy, message, error = self.send_request(method, api, params)
+ return message, error
+
+ def post(self, api, body, params=None):
+ method = 'POST'
+ dummy, message, error = self.send_request(method, api, params, json=body)
+ return message, error
+
+ def patch(self, api, body, params=None):
+ method = 'PATCH'
+ dummy, message, error = self.send_request(method, api, params, json=body)
+ return message, error
+
+ def delete(self, api, body=None, params=None):
+ method = 'DELETE'
+ dummy, message, error = self.send_request(method, api, params, json=body)
+ return message, error
+
+ def options(self, api, params=None):
+ method = 'OPTIONS'
+ dummy, message, error = self.send_request(method, api, params)
+ return message, error
+
+ def set_version(self, message):
+ try:
+ version = message.get('version', 'not found')
+ except AttributeError:
+ self.ontap_version['full'] = 'unreadable message'
+ return
+ for key in self.ontap_version:
+ try:
+ self.ontap_version[key] = version.get(key, -1)
+ except AttributeError:
+ self.ontap_version[key] = 'unreadable version'
+ self.ontap_version['valid'] = True
+ for key in self.ontap_version:
+ if self.ontap_version == -1:
+ self.ontap_version['valid'] = False
+ break
+
+ def _is_rest(self, used_unsupported_rest_properties=None):
+ if self.use_rest not in ['always', 'auto', 'never']:
+ error = "use_rest must be one of: never, always, auto. Got: '%s'" % self.use_rest
+ return False, error
+ if self.use_rest == "always":
+ if used_unsupported_rest_properties:
+ error = "REST API currently does not support '%s'" % \
+ ', '.join(used_unsupported_rest_properties)
+ return True, error
+ else:
+ return True, None
+ if self.use_rest == 'never' or used_unsupported_rest_properties:
+ # force ZAPI if requested or if some parameter requires it
+ return False, None
+ # using GET rather than HEAD because the error messages are different
+ method = 'GET'
+ api = 'cluster'
+ params = {'fields': ['version']}
+ status_code, message, error = self.send_request(method, api, params=params)
+ self.set_version(message)
+ self.is_rest_error = str(error) if error else None
+ if status_code == 200:
+ return True, None
+ self.log_error(status_code, str(error))
+ return False, None
+
+ def is_rest(self, used_unsupported_rest_properties=None):
+ ''' only return error if there is a reason to '''
+ use_rest, error = self._is_rest(used_unsupported_rest_properties)
+ if used_unsupported_rest_properties is None:
+ return use_rest
+ return use_rest, error
+
+ def log_error(self, status_code, message):
+ self.errors.append(message)
+ self.debug_logs.append((status_code, message))
+
+ def log_debug(self, status_code, content):
+ self.debug_logs.append((status_code, content))
+
+ def write_to_file(self, tag, data=None, filepath=None, append=True):
+ '''
+ This function is only for debug purposes, all calls to write_to_file should be removed
+ before submitting.
+ If data is None, tag is considered as data
+ else tag is a label, and data is data.
+ '''
+ if filepath is None:
+ filepath = '/tmp/ontap_log'
+ if append:
+ mode = 'a'
+ else:
+ mode = 'w'
+ with open(filepath, mode) as afile:
+ if data is not None:
+ afile.write("%s: %s\n" % (str(tag), str(data)))
+ else:
+ afile.write(str(tag))
+ afile.write('\n')
+
+ def write_errors_to_file(self, tag=None, filepath=None, append=True):
+ if tag is None:
+ tag = 'Error'
+ for error in self.errors:
+ self.write_to_file(tag, error, filepath, append)
+ if not append:
+ append = True
+
+ def write_debug_log_to_file(self, tag=None, filepath=None, append=True):
+ if tag is None:
+ tag = 'Debug'
+ for status_code, message in self.debug_logs:
+ self.write_to_file(tag, status_code, filepath, append)
+ if not append:
+ append = True
+ self.write_to_file(tag, message, filepath, append)
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py
new file mode 100644
index 00000000..b7331d87
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py
@@ -0,0 +1,159 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_SF_SDK = False
+try:
+ import solidfire.common
+
+ HAS_SF_SDK = True
+except Exception:
+ HAS_SF_SDK = False
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+class NaElementSWModule(object):
+
+ def __init__(self, elem):
+ self.elem_connect = elem
+ self.parameters = dict()
+
+ def get_volume(self, volume_id):
+ """
+ Return volume details if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume dict if found, None if not found
+ :rtype: dict
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume
+ return None
+
+ def get_volume_id(self, vol_name, account_id):
+ """
+ Return volume id from the given (valid) account_id if found
+ Return None if not found
+
+ :param vol_name: Name of the volume
+ :type vol_name: str
+ :param account_id: Account ID
+ :type account_id: int
+
+ :return: Volume ID of the first matching volume if found. None if not found.
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes_for_account(account_id=account_id)
+ for volume in volume_list.volumes:
+ if volume.name == vol_name:
+ # return volume_id
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
+
+ def volume_id_exists(self, volume_id):
+ """
+ Return volume_id if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume ID if found, None if not found
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
+
+ def volume_exists(self, volume, account_id):
+ """
+ Return volume_id if exists, None if not found
+
+ :param volume: Volume ID or Name
+ :type volume: str
+ :param account_id: Account ID (valid)
+ :type account_id: int
+ :return: Volume ID if found, None if not found
+ """
+ # If volume is an integer, get_by_id
+ if str(volume).isdigit():
+ volume_id = int(volume)
+ try:
+ if self.volume_id_exists(volume_id):
+ return volume_id
+ except solidfire.common.ApiServerError:
+ # don't fail, continue and try get_by_name
+ pass
+ # get volume by name
+ volume_id = self.get_volume_id(volume, account_id)
+ return volume_id
+
+ def get_snapshot(self, snapshot_id, volume_id):
+ """
+ Return snapshot details if found
+
+ :param snapshot_id: Snapshot ID or Name
+ :type snapshot_id: str
+ :param volume_id: Account ID (valid)
+ :type volume_id: int
+ :return: Snapshot dict if found, None if not found
+ :rtype: dict
+ """
+ # mandate src_volume_id although not needed by sdk
+ snapshot_list = self.elem_connect.list_snapshots(
+ volume_id=volume_id)
+ for snapshot in snapshot_list.snapshots:
+ # if actual id is provided
+ if str(snapshot_id).isdigit() and snapshot.snapshot_id == int(snapshot_id):
+ return snapshot
+ # if snapshot name is provided
+ elif snapshot.name == snapshot_id:
+ return snapshot
+ return None
+
+ def account_exists(self, account):
+ """
+ Return account_id if account exists for given account id or name
+ Raises an exception if account does not exist
+
+ :param account: Account ID or Name
+ :type account: str
+ :return: Account ID if found, None if not found
+ """
+ # If account is an integer, get_by_id
+ if account.isdigit():
+ account_id = int(account)
+ try:
+ result = self.elem_connect.get_account_by_id(account_id=account_id)
+ if result.account.account_id == account_id:
+ return account_id
+ except solidfire.common.ApiServerError:
+ # don't fail, continue and try get_by_name
+ pass
+ # get account by name, the method returns an Exception if account doesn't exist
+ result = self.elem_connect.get_account_by_name(username=account)
+ return result.account.account_id
+
+ def set_element_attributes(self, source):
+ """
+ Return telemetry attributes for the current execution
+
+ :param source: name of the module
+ :type source: str
+ :return: a dict containing telemetry attributes
+ """
+ attributes = {}
+ attributes['config-mgmt'] = 'ansible'
+ attributes['event-source'] = source
+ return(attributes)
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py
new file mode 100644
index 00000000..056e85e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py
@@ -0,0 +1,392 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+import re
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+def cmp(obj1, obj2):
+ """
+ Python 3 does not have a cmp function, this will do the cmp.
+ :param obj1: first object to check
+ :param obj2: second object to check
+ :return:
+ """
+ # convert to lower case for string comparison.
+ if obj1 is None:
+ return -1
+ if isinstance(obj1, str) and isinstance(obj2, str):
+ obj1 = obj1.lower()
+ obj2 = obj2.lower()
+ # if list has string element, convert string to lower case.
+ if isinstance(obj1, list) and isinstance(obj2, list):
+ obj1 = [x.lower() if isinstance(x, str) else x for x in obj1]
+ obj2 = [x.lower() if isinstance(x, str) else x for x in obj2]
+ obj1.sort()
+ obj2.sort()
+ return (obj1 > obj2) - (obj1 < obj2)
+
+
+class NetAppModule(object):
+ '''
+ Common class for NetApp modules
+ set of support functions to derive actions based
+ on the current state of the system, and a desired state
+ '''
+
+ def __init__(self):
+ self.log = list()
+ self.changed = False
+ self.parameters = {'name': 'not initialized'}
+ self.zapi_string_keys = dict()
+ self.zapi_bool_keys = dict()
+ self.zapi_list_keys = dict()
+ self.zapi_int_keys = dict()
+ self.zapi_required = dict()
+
+ def set_parameters(self, ansible_params):
+ self.parameters = dict()
+ for param in ansible_params:
+ if ansible_params[param] is not None:
+ self.parameters[param] = ansible_params[param]
+ return self.parameters
+
+ def check_and_set_parameters(self, module):
+ self.parameters = dict()
+ check_for_none = netapp_utils.has_feature(module, 'check_required_params_for_none')
+ if check_for_none:
+ required_keys = [key for key, value in module.argument_spec.items() if value.get('required')]
+ for param in module.params:
+ if module.params[param] is not None:
+ self.parameters[param] = module.params[param]
+ elif check_for_none and param in required_keys:
+ module.fail_json(msg="%s requires a value, got: None" % param)
+ return self.parameters
+
+ @staticmethod
+ def type_error_message(type_str, key, value):
+ return "expecting '%s' type for %s: %s, got: %s" % (type_str, repr(key), repr(value), type(value))
+
+ def get_value_for_bool(self, from_zapi, value, key=None):
+ """
+ Convert boolean values to string or vice-versa
+ If from_zapi = True, value is converted from string (as it appears in ZAPI) to boolean
+ If from_zapi = False, value is converted from boolean to string
+ For get() method, from_zapi = True
+ For modify(), create(), from_zapi = False
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :param value: value of the boolean attribute
+ :param key: if present, force error checking to validate type, and accepted values
+ :return: string or boolean
+ """
+ if value is None:
+ return None
+ if from_zapi:
+ if key is not None and not isinstance(value, str):
+ raise TypeError(self.type_error_message('str', key, value))
+ if key is not None and value not in ('true', 'false'):
+ raise ValueError('Unexpected value: %s received from ZAPI for boolean attribute: %s' % (repr(value), repr(key)))
+ return value == 'true'
+ if key is not None and not isinstance(value, bool):
+ raise TypeError(self.type_error_message('bool', key, value))
+ return 'true' if value else 'false'
+
+ def get_value_for_int(self, from_zapi, value, key=None):
+ """
+ Convert integer values to string or vice-versa
+ If from_zapi = True, value is converted from string (as it appears in ZAPI) to integer
+ If from_zapi = False, value is converted from integer to string
+ For get() method, from_zapi = True
+ For modify(), create(), from_zapi = False
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :param value: value of the integer attribute
+ :param key: if present, force error checking to validate type
+ :return: string or integer
+ """
+ if value is None:
+ return None
+ if from_zapi:
+ if key is not None and not isinstance(value, str):
+ raise TypeError(self.type_error_message('str', key, value))
+ return int(value)
+ if key is not None and not isinstance(value, int):
+ raise TypeError(self.type_error_message('int', key, value))
+ return str(value)
+
+ def get_value_for_list(self, from_zapi, zapi_parent, zapi_child=None, data=None):
+ """
+ Convert a python list() to NaElement or vice-versa
+ If from_zapi = True, value is converted from NaElement (parent-children structure) to list()
+ If from_zapi = False, value is converted from list() to NaElement
+ :param zapi_parent: ZAPI parent key or the ZAPI parent NaElement
+ :param zapi_child: ZAPI child key
+ :param data: list() to be converted to NaElement parent-children object
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :return: list() or NaElement
+ """
+ if from_zapi:
+ if zapi_parent is None:
+ return []
+ return [zapi_child.get_content() for zapi_child in zapi_parent.get_children()]
+
+ zapi_parent = netapp_utils.zapi.NaElement(zapi_parent)
+ for item in data:
+ zapi_parent.add_new_child(zapi_child, item)
+ return zapi_parent
+
+ def get_cd_action(self, current, desired):
+ ''' takes a desired state and a current state, and return an action:
+ create, delete, None
+ eg:
+ is_present = 'absent'
+ some_object = self.get_object(source)
+ if some_object is not None:
+ is_present = 'present'
+ action = cd_action(current=is_present, desired = self.desired.state())
+ '''
+ if 'state' in desired:
+ desired_state = desired['state']
+ else:
+ desired_state = 'present'
+
+ if current is None and desired_state == 'absent':
+ return None
+ if current is not None and desired_state == 'present':
+ return None
+ # change in state
+ self.changed = True
+ if current is not None:
+ return 'delete'
+ return 'create'
+
+ def compare_and_update_values(self, current, desired, keys_to_compare):
+ updated_values = dict()
+ is_changed = False
+ for key in keys_to_compare:
+ if key in current:
+ if key in desired and desired[key] is not None:
+ if current[key] != desired[key]:
+ updated_values[key] = desired[key]
+ is_changed = True
+ else:
+ updated_values[key] = current[key]
+ else:
+ updated_values[key] = current[key]
+
+ return updated_values, is_changed
+
+ @staticmethod
+ def check_keys(current, desired):
+ ''' TODO: raise an error if keys do not match
+ with the exception of:
+ new_name, state in desired
+ '''
+
+ @staticmethod
+ def compare_lists(current, desired, get_list_diff):
+ ''' compares two lists and return a list of elements that are either the desired elements or elements that are
+ modified from the current state depending on the get_list_diff flag
+ :param: current: current item attribute in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: list of attributes to be modified
+ :rtype: list
+ '''
+ current_copy = deepcopy(current)
+ desired_copy = deepcopy(desired)
+
+ # get what in desired and not in current
+ desired_diff_list = list()
+ for item in desired:
+ if item in current_copy:
+ current_copy.remove(item)
+ else:
+ desired_diff_list.append(item)
+
+ # get what in current but not in desired
+ current_diff_list = list()
+ for item in current:
+ if item in desired_copy:
+ desired_copy.remove(item)
+ else:
+ current_diff_list.append(item)
+
+ if desired_diff_list or current_diff_list:
+ # there are changes
+ if get_list_diff:
+ return desired_diff_list
+ else:
+ return desired
+ else:
+ return None
+
+ def get_modified_attributes(self, current, desired, get_list_diff=False):
+ ''' takes two dicts of attributes and return a dict of attributes that are
+ not in the current state
+ It is expected that all attributes of interest are listed in current and
+ desired.
+ :param: current: current attributes in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: dict of attributes to be modified
+ :rtype: dict
+
+ NOTE: depending on the attribute, the caller may need to do a modify or a
+ different operation (eg move volume if the modified attribute is an
+ aggregate name)
+ '''
+ # if the object does not exist, we can't modify it
+ modified = dict()
+ if current is None:
+ return modified
+
+ # error out if keys do not match
+ self.check_keys(current, desired)
+
+ # collect changed attributes
+ for key, value in current.items():
+ if key in desired and desired[key] is not None:
+ if isinstance(value, list):
+ modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired
+ if modified_list is not None:
+ modified[key] = modified_list
+ else:
+ try:
+ result = cmp(value, desired[key])
+ except TypeError as exc:
+ raise TypeError("%s, key: %s, value: %s, desired: %s" % (repr(exc), key, repr(value), repr(desired[key])))
+ else:
+ if result != 0:
+ modified[key] = desired[key]
+ if modified:
+ self.changed = True
+ return modified
+
+ def is_rename_action(self, source, target):
+ ''' takes a source and target object, and returns True
+ if a rename is required
+ eg:
+ source = self.get_object(source_name)
+ target = self.get_object(target_name)
+ action = is_rename_action(source, target)
+ :return: None for error, True for rename action, False otherwise
+ '''
+ if source is None and target is None:
+ # error, do nothing
+ # cannot rename an non existent resource
+ # alternatively we could create B
+ return None
+ if source is not None and target is not None:
+ # error, do nothing
+ # idempotency (or) new_name_is_already_in_use
+ # alternatively we could delete B and rename A to B
+ return False
+ if source is None and target is not None:
+ # do nothing, maybe the rename was already done
+ return False
+ # source is not None and target is None:
+ # rename is in order
+ self.changed = True
+ return True
+
+ @staticmethod
+ def sanitize_wwn(initiator):
+ ''' igroup initiator may or may not be using WWN format: eg 20:00:00:25:B5:00:20:01
+ if format is matched, convert initiator to lowercase, as this is what ONTAP is using '''
+ wwn_format = r'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}'
+ initiator = initiator.strip()
+ if re.match(wwn_format, initiator):
+ initiator = initiator.lower()
+ return initiator
+
+ def safe_get(self, an_object, key_list, allow_sparse_dict=True):
+ ''' recursively traverse a dictionary or a any object supporting get_item
+ (in our case, python dicts and NAElement responses)
+ It is expected that some keys can be missing, this is controlled with allow_sparse_dict
+
+ return value if the key chain is exhausted
+ return None if a key is not found and allow_sparse_dict is True
+ raise KeyError is a key is not found and allow_sparse_dict is False (looking for exact match)
+ raise TypeError if an intermediate element cannot be indexed,
+ unless the element is None and allow_sparse_dict is True
+ '''
+ if not key_list:
+ # we've exhausted the keys, good!
+ return an_object
+ key = key_list.pop(0)
+ try:
+ return self.safe_get(an_object[key], key_list, allow_sparse_dict=allow_sparse_dict)
+ except KeyError as exc:
+ # error, key not found
+ if allow_sparse_dict:
+ return None
+ raise exc
+ except TypeError as exc:
+ # error, we were expecting a dict or NAElement
+ if allow_sparse_dict and an_object is None:
+ return None
+ raise exc
+
+ def filter_out_none_entries(self, list_or_dict):
+ """take a dict or list as input and return a dict/list without keys/elements whose values are None
+ skip empty dicts or lists.
+ """
+
+ if isinstance(list_or_dict, dict):
+ result = dict()
+ for key, value in list_or_dict.items():
+ if isinstance(value, (list, dict)):
+ sub = self.filter_out_none_entries(value)
+ if sub:
+ # skip empty dict or list
+ result[key] = sub
+ elif value is not None:
+ # skip None value
+ result[key] = value
+ return result
+
+ if isinstance(list_or_dict, list):
+ alist = list()
+ for item in list_or_dict:
+ if isinstance(item, (list, dict)):
+ sub = self.filter_out_none_entries(item)
+ if sub:
+ # skip empty dict or list
+ alist.append(sub)
+ elif item is not None:
+ # skip None value
+ alist.append(item)
+ return alist
+
+ raise TypeError('unexpected type %s' % type(list_or_dict))
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py
new file mode 100644
index 00000000..b0b3f774
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py
@@ -0,0 +1,160 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support class for NetApp ansible modules
+
+ Provides accesss to application resources using REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class RestApplication():
+ """Helper methods to manage application and application components"""
+ def __init__(self, rest_api, svm_name, app_name):
+ self.svm_name = svm_name
+ self.app_name = app_name
+ self.app_uuid = None
+ self.rest_api = rest_api
+
+ def _set_application_uuid(self):
+ """Use REST application/applications to get application uuid"""
+ api = '/application/applications'
+ query = {'return_timeout': 30, 'return_records': 'true', 'svm.name': self.svm_name, 'name': self.app_name}
+ response, error = self.rest_api.get(api, query)
+ record, error = rrh.check_for_0_or_1_records(api, response, error, query)
+ if error is None and record is not None:
+ self.app_uuid = record['uuid']
+ return None, error
+
+ def get_application_uuid(self):
+ """Use REST application/applications to get application uuid"""
+ error = None
+ if self.app_uuid is None:
+ dummy, error = self._set_application_uuid()
+ return self.app_uuid, error
+
+ def get_application_details(self):
+ """Use REST application/applications to get application details"""
+ uuid, error = self.get_application_uuid()
+ if error:
+ return uuid, error
+ if uuid is None: # not found
+ return None, None
+ api = '/application/applications/%s' % uuid
+ response, error = self.rest_api.get(api)
+ return response, rrh.api_error(api, error)
+
+ def create_application(self, body):
+ """Use REST application/applications san template to create one or more LUNs"""
+ self.fail_if_uuid()
+ api = '/application/applications'
+ query = {'return_timeout': 30, 'return_records': 'true'}
+ response, error = self.rest_api.post(api, body, params=query)
+ return rrh.check_for_error_and_job_results(api, response, error, self.rest_api)
+
+ def create_application_body(self, template_name, template_body, smart_container=True):
+ if not isinstance(smart_container, bool):
+ error = "expecting bool value for smart_container, got: %s" % smart_container
+ return None, error
+ body = {
+ 'name': self.app_name,
+ 'svm': {'name': self.svm_name},
+ 'smart_container': smart_container,
+ template_name: template_body
+ }
+ return body, None
+
+ def delete_application(self):
+ """Use REST application/applications to delete app"""
+ self.fail_if_no_uuid()
+ api = '/application/applications/%s' % self.app_uuid
+ query = {'return_timeout': 30}
+ response, error = self.rest_api.delete(api, params=query)
+ response, error = rrh.check_for_error_and_job_results(api, response, error, self.rest_api)
+ self.app_uuid = None
+ return response, error
+
+ def get_application_components(self):
+ """Use REST application/applications to get application components"""
+ self.fail_if_no_uuid()
+ api = '/application/applications/%s/components' % self.app_uuid
+ response, error = self.rest_api.get(api)
+ return response, rrh.api_error(api, error)
+
+ def get_application_component_uuid(self):
+ """Use REST application/applications to get component uuid
+ Assume a single component per application
+ """
+ self.fail_if_no_uuid()
+ response, error = self.get_application_components()
+ record, error = rrh.check_for_0_or_1_records(None, response, error, None)
+ if error is None and record is not None:
+ return record['uuid'], None
+ return None, error
+
+ def get_application_component_details(self, comp_uuid=None):
+ """Use REST application/applications to get application components"""
+ self.fail_if_no_uuid()
+ if comp_uuid is None:
+ # assume a single component
+ comp_uuid, error = self.get_application_component_uuid()
+ if error:
+ return comp_uuid, error
+ if comp_uuid is None:
+ error = 'no component for application %s' % self.app_name
+ return None, error
+ api = '/application/applications/%s/components/%s' % (self.app_uuid, comp_uuid)
+ response, error = self.rest_api.get(api)
+ return response, rrh.api_error(api, error)
+
+ def get_application_component_backing_storage(self):
+ """Use REST application/applications to get component uuid.
+
+ Assume a single component per application
+ """
+ self.fail_if_no_uuid()
+ response, error = self.get_application_component_details()
+ if error or response is None:
+ return response, error
+ return response['backing_storage'], None
+
+ def fail_if_no_uuid(self):
+ """Prevent a logic error."""
+ if self.app_uuid is None:
+ msg = 'function should not be called before application uuid is set.'
+ return None, msg
+
+ def fail_if_uuid(self):
+ """Prevent a logic error."""
+ if self.app_uuid is not None:
+ msg = 'function should not be called when application uuid is set.'
+ return None, msg
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py
new file mode 100644
index 00000000..edebcf60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py
@@ -0,0 +1,93 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support functions for NetApp ansible modules
+
+ Provides common processing for responses and errors from REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def api_error(api, error):
+ """format error message for api error, if error is present"""
+ if error is not None:
+ return "calling: %s: got %s" % (api, error)
+ return None
+
+
+def no_response_error(api, response):
+ """format error message for empty response"""
+ return "calling: %s: no response %s" % (api, repr(response))
+
+
+def job_error(response, error):
+ """format error message for job error"""
+ return "job reported error: %s, received %s" % (error, repr(response))
+
+
+def unexpected_response_error(api, response, query=None):
+ """format error message for reponse not matching expectations"""
+ msg = "calling: %s: unexpected response %s" % (api, repr(response))
+ if query:
+ msg += " for query: %s" % repr(query)
+ return response, msg
+
+
+def check_for_0_or_1_records(api, response, error, query=None):
+ """return None if no record was returned by the API
+ return record if one record was returned by the API
+ return error otherwise (error, no response, more than 1 record)
+ """
+ if error:
+ if api:
+ return None, api_error(api, error)
+ return None, error
+ if not response:
+ return None, no_response_error(api, response)
+ if response['num_records'] == 0:
+ return None, None # not found
+ if response['num_records'] != 1:
+ return unexpected_response_error(api, response, query)
+ return response['records'][0], None
+
+
+def check_for_error_and_job_results(api, response, error, rest_api):
+ """report first error if present
+ otherwise call wait_on_job and retrieve job response or error
+ """
+ if error:
+ error = api_error(api, error)
+ elif 'job' in response:
+ job_response, error = rest_api.wait_on_job(response['job'])
+ if error:
+ error = job_error(response, error)
+ else:
+ response['job_response'] = job_response
+ return response, error
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py
new file mode 100644
index 00000000..a1a0c50c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py
@@ -0,0 +1,133 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules
+
+ Provides accesss to SVM (vserver) resources using ZAPI calls
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+def get_vserver(svm_cx, vserver_name):
+ """
+ Return vserver information.
+
+ :return:
+ vserver object if vserver found
+ None if vserver is not found
+ :rtype: object/None
+ """
+ vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-info', **{'vserver-name': vserver_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+
+ result = svm_cx.invoke_successfully(vserver_info, enable_tunneling=False)
+ vserver_details = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ vserver_info = attributes_list.get_child_by_name('vserver-info')
+ aggr_list = list()
+ # vserver aggr-list can be empty by default
+ get_list = vserver_info.get_child_by_name('aggr-list')
+ if get_list is not None:
+ aggregates = get_list.get_children()
+ for aggr in aggregates:
+ aggr_list.append(aggr.get_content())
+
+ protocols = list()
+ # allowed-protocols is not empty for data SVM, but is for node SVM
+ allowed_protocols = vserver_info.get_child_by_name('allowed-protocols')
+ if allowed_protocols is not None:
+ get_protocols = allowed_protocols.get_children()
+ for protocol in get_protocols:
+ protocols.append(protocol.get_content())
+ vserver_details = {'name': vserver_info.get_child_content('vserver-name'),
+ 'root_volume': vserver_info.get_child_content('root-volume'),
+ 'root_volume_aggregate': vserver_info.get_child_content('root-volume-aggregate'),
+ 'root_volume_security_style': vserver_info.get_child_content('root-volume-security-style'),
+ 'subtype': vserver_info.get_child_content('vserver-subtype'),
+ 'aggr_list': aggr_list,
+ 'language': vserver_info.get_child_content('language'),
+ 'quota_policy': vserver_info.get_child_content('quota-policy'),
+ 'snapshot_policy': vserver_info.get_child_content('snapshot-policy'),
+ 'allowed_protocols': protocols,
+ 'ipspace': vserver_info.get_child_content('ipspace'),
+ 'comment': vserver_info.get_child_content('comment')}
+
+ return vserver_details
+
+
+def modify_vserver(svm_cx, module, name, modify, parameters=None):
+ '''
+ Modify vserver.
+ :param name: vserver name
+ :param modify: list of modify attributes
+ :param parameters: customer original inputs
+ modify only contains the difference between the customer inputs and current
+ for some attributes, it may be safer to apply the original inputs
+ '''
+ if parameters is None:
+ parameters = modify
+
+ vserver_modify = netapp_utils.zapi.NaElement('vserver-modify')
+ vserver_modify.add_new_child('vserver-name', name)
+ for attribute in modify:
+ if attribute == 'comment':
+ vserver_modify.add_new_child('comment', parameters['comment'])
+ if attribute == 'language':
+ vserver_modify.add_new_child('language', parameters['language'])
+ if attribute == 'quota_policy':
+ vserver_modify.add_new_child('quota-policy', parameters['quota_policy'])
+ if attribute == 'snapshot_policy':
+ vserver_modify.add_new_child('snapshot-policy', parameters['snapshot_policy'])
+ if attribute == 'allowed_protocols':
+ allowed_protocols = netapp_utils.zapi.NaElement('allowed-protocols')
+ for protocol in parameters['allowed_protocols']:
+ allowed_protocols.add_new_child('protocol', protocol)
+ vserver_modify.add_child_elem(allowed_protocols)
+ if attribute == 'aggr_list':
+ aggregates = netapp_utils.zapi.NaElement('aggr-list')
+ for aggr in parameters['aggr_list']:
+ aggregates.add_new_child('aggr-name', aggr)
+ vserver_modify.add_child_elem(aggregates)
+ try:
+ svm_cx.invoke_successfully(vserver_modify, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ module.fail_json(msg='Error modifying SVM %s: %s' % (name, to_native(exc)),
+ exception=traceback.format_exc())
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py
new file mode 100644
index 00000000..f4aeb945
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_active_directory
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP configure active directory
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.9.0
+description:
+ - Configure Active Directory
+
+options:
+ state:
+ description:
+ - Whether the Active Directory should exist or not
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ account_name:
+ description:
+ - Active Directory account NetBIOS name.
+ required: true
+ type: str
+
+ admin_password:
+ description:
+ - Administrator password required for Active Directory account creation.
+ required: true
+ type: str
+
+ admin_username:
+ description:
+ - Administrator username required for Active Directory account creation.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Fully qualified domain name.
+ type: str
+
+ force_account_overwrite:
+ description:
+ - If true and a machine account with the same name as specified in 'account-name' exists in Active Directory, it will be overwritten and reused.
+ type: bool
+
+ organizational_unit:
+ description:
+ - Organizational unit under which the Active Directory account will be created.
+ type: str
+'''
+EXAMPLES = """
+-
+ name: Ontap test
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ tasks:
+ - name: run ontap active directory
+ na_ontap_active_directory:
+ hostname: 10.193.78.219
+ username: admin
+ password: netapp1!
+ https: True
+ validate_certs: False
+ vserver: laurentncluster-1
+ state: present
+ account_name: carchi
+ admin_password: password
+ admin_username: carchi
+ domain: 12
+"""
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapActiveDirectory(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ account_name=dict(required=True, type='str'),
+ admin_password=dict(required=True, type='str', no_log=True),
+ admin_username=dict(required=True, type='str'),
+ domain=dict(type="str", default=None),
+ force_account_overwrite=dict(type="bool", default=None),
+ organizational_unit=dict(type="str", default=None)
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_active_directory(self):
+ active_directory_iter = netapp_utils.zapi.NaElement('active-directory-account-get-iter')
+ acitve_directory_info = netapp_utils.zapi.NaElement('active-directory-account-config')
+ acitve_directory_info.add_new_child('account-name', self.parameters['account_name'])
+ acitve_directory_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(acitve_directory_info)
+ active_directory_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(active_directory_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Active Directory %s: %s' %
+ (self.parameters['account-name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result.get_child_by_name('attributes-list').get_child_by_name('active-directory-account-config')
+ return None
+
+ def create_active_directory(self):
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-create')
+ active_directory_obj.add_new_child('account-name', self.parameters['account_name'])
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ if self.parameters.get('domain'):
+ active_directory_obj.add_new_child('domain', self.parameters['domain'])
+ if self.parameters.get('force_account_overwrite'):
+ active_directory_obj.add_new_child('force-account-overwrite', str(self.parameters['force_account_overwrite']))
+ if self.parameters.get('organizational_unit'):
+ active_directory_obj.add_new_child('organizational-unit', self.parameters['organizational_unit'])
+ try:
+ result = self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating on Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_active_directory(self):
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-delete')
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ try:
+ result = self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting on Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_active_directory(self):
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-modify')
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ if self.parameters.get('domain'):
+ active_directory_obj.add_new_child('domain', self.parameters['domain'])
+ if self.parameters.get('force_account_overwrite'):
+ active_directory_obj.add_new_child('force-account-overwrite', str(self.parameters['force_account_overwrite']))
+ try:
+ result = self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting on Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_active_directory")
+ current = self.get_active_directory()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ # TODO add Modify
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_active_directory()
+ elif cd_action == 'delete':
+ self.delete_active_directory()
+ elif modify:
+ self.modify_active_directory()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapActiveDirectory()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py
new file mode 100644
index 00000000..3161ee1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py
@@ -0,0 +1,824 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_aggregate
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_aggregate
+short_description: NetApp ONTAP manage aggregates.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete, or manage aggregates on ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ service_state:
+ description:
+ - Whether the specified aggregate should be enabled or disabled. Creates aggregate if doesnt exist.
+ choices: ['online', 'offline']
+ type: str
+
+ name:
+ description:
+ - The name of the aggregate to manage.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the aggregate to be renamed.
+ type: str
+ version_added: 2.7.0
+
+ nodes:
+ description:
+ - Node(s) for the aggregate to be created on. If no node specified, mgmt lif home will be used.
+ - If multiple nodes specified an aggr stripe will be made.
+ type: list
+ elements: str
+
+ disk_type:
+ description:
+ - Type of disk to use to build aggregate
+ choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'VMDISK']
+ type: str
+ version_added: 2.7.0
+
+ disk_count:
+ description:
+ - Number of disks to place into the aggregate, including parity disks.
+ - The disks in this newly-created aggregate come from the spare disk pool.
+ - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
+ - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
+ - Required when C(state=present).
+ - Modifiable only if specified disk_count is larger than current disk_count.
+ - Cannot create raidgroup with 1 disk when using raid type raid4.
+ - If the disk_count % raid_size == 1, only disk_count/raid_size * raid_size will be added.
+ - If disk_count is 6, raid_type is raid4, raid_size 4, all 6 disks will be added.
+ - If disk_count is 5, raid_type is raid4, raid_size 4, 5/4 * 4 = 4 will be added. 1 will not be added.
+ type: int
+
+ disk_size:
+ description:
+ - Disk size to use in 4K block size. Disks within 10% of specified size will be used.
+ type: int
+ version_added: 2.7.0
+
+ disk_size_with_unit:
+ description:
+ - Disk size to use in the specified unit.
+ - It is a positive integer number followed by unit of T/G/M/K. For example, 72G, 1T and 32M.
+ - This option is ignored if a specific list of disks is specified through the "disks" parameter.
+ - You must only use one of either "disk-size" or "disk-size-with-unit" parameters.
+ type: str
+
+ raid_size:
+ description:
+ - Sets the maximum number of drives per raid group.
+ type: int
+ version_added: 2.7.0
+
+ raid_type:
+ description:
+ - Specifies the type of RAID groups to use in the new aggregate.
+ - raid_0 is only available on ONTAP Select.
+ choices: ['raid4', 'raid_dp', 'raid_tec', 'raid_0']
+ type: str
+ version_added: 2.7.0
+
+ unmount_volumes:
+ description:
+ - If set to "TRUE", this option specifies that all of the volumes hosted by the given aggregate are to be unmounted
+ - before the offline operation is executed.
+ - By default, the system will reject any attempt to offline an aggregate that hosts one or more online volumes.
+ type: bool
+
+ disks:
+ description:
+ - Specific list of disks to use for the new aggregate.
+ - To create a "mirrored" aggregate with a specific list of disks, both 'disks' and 'mirror_disks' options must be supplied.
+ Additionally, the same number of disks must be supplied in both lists.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ is_mirrored:
+ description:
+ - Specifies that the new aggregate be mirrored (have two plexes).
+ - If set to true, then the indicated disks will be split across the two plexes. By default, the new aggregate will not be mirrored.
+ - This option cannot be used when a specific list of disks is supplied with either the 'disks' or 'mirror_disks' options.
+ type: bool
+ version_added: 2.8.0
+
+ mirror_disks:
+ description:
+ - List of mirror disks to use. It must contain the same number of disks specified in 'disks'.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ spare_pool:
+ description:
+ - Specifies the spare pool from which to select spare disks to use in creation of a new aggregate.
+ choices: ['Pool0', 'Pool1']
+ type: str
+ version_added: 2.8.0
+
+ wait_for_online:
+ description:
+ - Set this parameter to 'true' for synchronous execution during create (wait until aggregate status is online)
+ - Set this parameter to 'false' for asynchronous execution
+ - For asynchronous, execution exits as soon as the request is sent, without checking aggregate status
+ type: bool
+ default: false
+ version_added: 2.8.0
+
+ time_out:
+ description:
+ - time to wait for aggregate creation in seconds
+ - default is set to 100 seconds
+ type: int
+ default: 100
+ version_added: 2.8.0
+
+ object_store_name:
+ description:
+ - Name of the object store configuration attached to the aggregate
+ type: str
+ version_added: 2.9.0
+
+ snaplock_type:
+ description:
+ - Type of snaplock for the aggregate being created.
+ choices: ['compliance', 'enterprise', 'non_snaplock']
+ type: str
+ version_added: 20.1.0
+
+ ignore_pool_checks:
+ description:
+ - only valid when I(disks) option is used.
+ - disks in a plex should belong to the same spare pool, and mirror disks to another spare pool.
+ - when set to true, these checks are ignored.
+ type: bool
+ version_added: 20.8.0
+'''
+
+EXAMPLES = """
+- name: Create Aggregates and wait 5 minutes until aggregate is online
+ na_ontap_aggregate:
+ state: present
+ service_state: online
+ name: ansibleAggr
+ disk_count: 1
+ wait_for_online: True
+ time_out: 300
+ snaplock_type: non_snaplock
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Manage Aggregates
+ na_ontap_aggregate:
+ state: present
+ service_state: offline
+ unmount_volumes: true
+ name: ansibleAggr
+ disk_count: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Attach object store
+ na_ontap_aggregate:
+ state: present
+ name: aggr4
+ object_store_name: sgws_305
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename Aggregates
+ na_ontap_aggregate:
+ state: present
+ service_state: online
+ from_name: ansibleAggr
+ name: ansibleAggr2
+ disk_count: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Delete Aggregates
+ na_ontap_aggregate:
+ state: absent
+ service_state: offline
+ unmount_volumes: true
+ name: ansibleAggr
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapAggregate(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ disks=dict(required=False, type='list', elements='str'),
+ disk_count=dict(required=False, type='int', default=None),
+ disk_size=dict(required=False, type='int'),
+ disk_size_with_unit=dict(required=False, type='str'),
+ disk_type=dict(required=False, choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'VMDISK']),
+ from_name=dict(required=False, type='str'),
+ mirror_disks=dict(required=False, type='list', elements='str'),
+ nodes=dict(required=False, type='list', elements='str'),
+ is_mirrored=dict(required=False, type='bool'),
+ raid_size=dict(required=False, type='int'),
+ raid_type=dict(required=False, choices=['raid4', 'raid_dp', 'raid_tec', 'raid_0']),
+ service_state=dict(required=False, choices=['online', 'offline']),
+ spare_pool=dict(required=False, choices=['Pool0', 'Pool1']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ unmount_volumes=dict(required=False, type='bool'),
+ wait_for_online=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=100),
+ object_store_name=dict(required=False, type='str'),
+ snaplock_type=dict(required=False, type='str', choices=['compliance', 'enterprise', 'non_snaplock']),
+ ignore_pool_checks=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('service_state', 'offline', ['unmount_volumes']),
+ ],
+ mutually_exclusive=[
+ ('is_mirrored', 'disks'),
+ ('is_mirrored', 'mirror_disks'),
+ ('is_mirrored', 'spare_pool'),
+ ('spare_pool', 'disks'),
+ ('disk_count', 'disks'),
+ ('disk_size', 'disk_size_with_unit')
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.using_vserver_msg = None # This module should be run as cluster admin
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('mirror_disks') is not None and self.parameters.get('disks') is None:
+ self.module.fail_json(msg="mirror_disks require disks options to be set")
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def aggr_get_iter(self, name):
+ """
+ Return aggr-get-iter query results
+ :param name: Name of the aggregate
+ :return: NaElement if aggregate found, None otherwise
+ """
+
+ aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-attributes', **{'aggregate-name': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ aggr_get_iter.add_child_elem(query)
+ result = None
+ try:
+ result = self.server.invoke_successfully(aggr_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13040 denotes an aggregate not being found.
+ if to_native(error.code) == "13040":
+ pass
+ else:
+ msg = to_native(error)
+ if self.using_vserver_msg is not None:
+ msg += '. Added info: %s.' % self.using_vserver_msg
+ self.module.fail_json(msg=msg, exception=traceback.format_exc())
+ return result
+
+ def get_aggr(self, name=None):
+ """
+ Fetch details if aggregate exists.
+ :param name: Name of the aggregate to be fetched
+ :return:
+ Dictionary of current details if aggregate found
+ None if aggregate is not found
+ """
+ if name is None:
+ name = self.parameters['name']
+ aggr_get = self.aggr_get_iter(name)
+ if (aggr_get and aggr_get.get_child_by_name('num-records') and
+ int(aggr_get.get_child_content('num-records')) >= 1):
+ current_aggr = dict()
+ attr = aggr_get.get_child_by_name('attributes-list').get_child_by_name('aggr-attributes')
+ current_aggr['service_state'] = attr.get_child_by_name('aggr-raid-attributes').get_child_content('state')
+ if attr.get_child_by_name('aggr-raid-attributes').get_child_content('disk-count'):
+ current_aggr['disk_count'] = int(attr.get_child_by_name('aggr-raid-attributes').get_child_content('disk-count'))
+ return current_aggr
+ return None
+
+ def disk_get_iter(self, name):
+ """
+ Return storage-disk-get-iter query results
+ Filter disk list by aggregate name, and only reports disk-name and plex-name
+ :param name: Name of the aggregate
+ :return: NaElement
+ """
+
+ disk_get_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ query_details = {
+ 'query': {
+ 'storage-disk-info': {
+ 'disk-raid-info': {
+ 'disk-aggregate-info': {
+ 'aggregate-name': name
+ }
+ }
+ }
+ }
+ }
+ disk_get_iter.translate_struct(query_details)
+ attributes = {
+ 'desired-attributes': {
+ 'storage-disk-info': {
+ 'disk-name': None,
+ 'disk-raid-info': {
+ 'disk_aggregate_info': {
+ 'plex-name': None
+ }
+ }
+ }
+ }
+ }
+ disk_get_iter.translate_struct(attributes)
+
+ result = None
+ try:
+ result = self.server.invoke_successfully(disk_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return result
+
+ def get_aggr_disks(self, name):
+ """
+ Fetch disks that are used for this aggregate.
+ :param name: Name of the aggregate to be fetched
+ :return:
+ list of tuples (disk-name, plex-name)
+ empty list if aggregate is not found
+ """
+ disks = list()
+ aggr_get = self.disk_get_iter(name)
+ if (aggr_get and aggr_get.get_child_by_name('num-records') and
+ int(aggr_get.get_child_content('num-records')) >= 1):
+ attr = aggr_get.get_child_by_name('attributes-list')
+ disks = [(disk_info.get_child_content('disk-name'),
+ disk_info.get_child_by_name('disk-raid-info').get_child_by_name('disk-aggregate-info').get_child_content('plex-name'))
+ for disk_info in attr.get_children()]
+ return disks
+
+ def object_store_get_iter(self, name):
+ """
+ Return aggr-object-store-get query results
+ :return: NaElement if object-store for given aggregate found, None otherwise
+ """
+
+ object_store_get_iter = netapp_utils.zapi.NaElement('aggr-object-store-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'object-store-information', **{'object-store-name': self.parameters.get('object_store_name'),
+ 'aggregate': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ object_store_get_iter.add_child_elem(query)
+ result = None
+ try:
+ result = self.server.invoke_successfully(object_store_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return result
+
+ def get_object_store(self, name):
+ """
+ Fetch details if object store attached to the given aggregate exists.
+ :return:
+ Dictionary of current details if object store attached to the given aggregate is found
+ None if object store is not found
+ """
+ object_store_get = self.object_store_get_iter(name)
+ if (object_store_get and object_store_get.get_child_by_name('num-records') and
+ int(object_store_get.get_child_content('num-records')) >= 1):
+ current_object_store = dict()
+ attr = object_store_get.get_child_by_name('attributes-list').\
+ get_child_by_name('object-store-information')
+ current_object_store['object_store_name'] = attr.get_child_content('object-store-name')
+ return current_object_store
+ return None
+
+ def aggregate_online(self):
+ """
+ Set state of an offline aggregate to online
+ :return: None
+ """
+ online_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-online', **{'aggregate': self.parameters['name'],
+ 'force-online': 'true'})
+ try:
+ self.server.invoke_successfully(online_aggr,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' %
+ (self.parameters['name'], self.parameters['service_state'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def aggregate_offline(self):
+ """
+ Set state of an online aggregate to offline
+ :return: None
+ """
+ offline_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-offline', **{'aggregate': self.parameters['name'],
+ 'force-offline': 'false',
+ 'unmount-volumes': str(self.parameters['unmount_volumes'])})
+ try:
+ self.server.invoke_successfully(offline_aggr, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' %
+ (self.parameters['name'], self.parameters['service_state'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def get_disks_or_mirror_disks_object(name, disks):
+ '''
+ create ZAPI object for disks or mirror_disks
+ '''
+ disks_obj = netapp_utils.zapi.NaElement(name)
+ for disk in disks:
+ disk_info_obj = netapp_utils.zapi.NaElement('disk-info')
+ disk_info_obj.add_new_child('name', disk)
+ disks_obj.add_child_elem(disk_info_obj)
+ return disks_obj
+
+ def create_aggr(self):
+ """
+ Create aggregate
+ :return: None
+ """
+ options = {'aggregate': self.parameters['name']}
+ if self.parameters.get('disk_count'):
+ options['disk-count'] = str(self.parameters['disk_count'])
+ if self.parameters.get('disk_type'):
+ options['disk-type'] = self.parameters['disk_type']
+ if self.parameters.get('raid_size'):
+ options['raid-size'] = str(self.parameters['raid_size'])
+ if self.parameters.get('raid_type'):
+ options['raid-type'] = self.parameters['raid_type']
+ if self.parameters.get('disk_size'):
+ options['disk-size'] = str(self.parameters['disk_size'])
+ if self.parameters.get('disk_size_with_unit'):
+ options['disk-size-with-unit'] = str(self.parameters['disk_size_with_unit'])
+ if self.parameters.get('is_mirrored'):
+ options['is-mirrored'] = str(self.parameters['is_mirrored'])
+ if self.parameters.get('spare_pool'):
+ options['spare-pool'] = self.parameters['spare_pool']
+ if self.parameters.get('raid_type'):
+ options['raid-type'] = self.parameters['raid_type']
+ if self.parameters.get('snaplock_type'):
+ options['snaplock-type'] = self.parameters['snaplock_type']
+ if self.parameters.get('ignore_pool_checks'):
+ options['ignore-pool-checks'] = str(self.parameters['ignore_pool_checks'])
+ aggr_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-create', **options)
+ if self.parameters.get('nodes'):
+ nodes_obj = netapp_utils.zapi.NaElement('nodes')
+ aggr_create.add_child_elem(nodes_obj)
+ for node in self.parameters['nodes']:
+ nodes_obj.add_new_child('node-name', node)
+ if self.parameters.get('disks'):
+ aggr_create.add_child_elem(self.get_disks_or_mirror_disks_object('disks', self.parameters.get('disks')))
+ if self.parameters.get('mirror_disks'):
+ aggr_create.add_child_elem(self.get_disks_or_mirror_disks_object('mirror-disks', self.parameters.get('mirror_disks')))
+
+ try:
+ self.server.invoke_successfully(aggr_create, enable_tunneling=False)
+ if self.parameters.get('wait_for_online'):
+ # round off time_out
+ retries = (self.parameters['time_out'] + 5) / 10
+ current = self.get_aggr()
+ status = None if current is None else current['service_state']
+ while status != 'online' and retries > 0:
+ time.sleep(10)
+ retries = retries - 1
+ current = self.get_aggr()
+ status = None if current is None else current['service_state']
+ else:
+ current = self.get_aggr()
+ if current is not None and current.get('disk_count') != self.parameters.get('disk_count'):
+ self.module.exit_json(changed=self.na_helper.changed,
+ warnings="Aggregate created with mismatched disk_count: created %s not %s"
+ % (current.get('disk_count'), self.parameters.get('disk_count')))
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error provisioning aggregate %s: %s"
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_aggr(self):
+ """
+ Delete aggregate.
+ :return: None
+ """
+ aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-destroy', **{'aggregate': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(aggr_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_aggregate(self):
+ """
+ Rename aggregate.
+ """
+ aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-rename', **{'aggregate': self.parameters['from_name'],
+ 'new-aggregate-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(aggr_rename, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error renaming aggregate %s: %s"
+ % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_aggr(self, modify):
+ """
+ Modify state of the aggregate
+ :param modify: dictionary of parameters to be modified
+ :return: None
+ """
+ if modify.get('service_state') == 'offline':
+ self.aggregate_offline()
+ else:
+ disk_size = 0
+ disk_size_with_unit = None
+ if modify.get('service_state') == 'online':
+ self.aggregate_online()
+ if modify.get('disk_size'):
+ disk_size = modify.get('disk_size')
+ if modify.get('disk_size_with_unit'):
+ disk_size_with_unit = modify.get('disk_size_with_unit')
+ if modify.get('disk_count'):
+ self.add_disks(modify['disk_count'], disk_size=disk_size, disk_size_with_unit=disk_size_with_unit)
+ if modify.get('disks_to_add') or modify.get('mirror_disks_to_add'):
+ self.add_disks(0, modify.get('disks_to_add'), modify.get('mirror_disks_to_add'))
+
+ def attach_object_store_to_aggr(self):
+ """
+ Attach object store to aggregate.
+ :return: None
+ """
+ attach_object_store = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-object-store-attach', **{'aggregate': self.parameters['name'],
+ 'object-store-name': self.parameters['object_store_name']})
+
+ try:
+ self.server.invoke_successfully(attach_object_store,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error attaching object store %s to aggregate %s: %s" %
+ (self.parameters['object_store_name'], self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_disks(self, count=0, disks=None, mirror_disks=None, disk_size=0, disk_size_with_unit=None):
+ """
+ Add additional disks to aggregate.
+ :return: None
+ """
+ options = {'aggregate': self.parameters['name']}
+ if count:
+ options['disk-count'] = str(count)
+ if disks and self.parameters.get('ignore_pool_checks'):
+ options['ignore-pool-checks'] = str(self.parameters['ignore_pool_checks'])
+ if disk_size:
+ options['disk-size'] = str(disk_size)
+ if disk_size_with_unit:
+ options['disk-size-with-unit'] = disk_size_with_unit
+ aggr_add = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-add', **options)
+ if disks:
+ aggr_add.add_child_elem(self.get_disks_or_mirror_disks_object('disks', disks))
+ if mirror_disks:
+ aggr_add.add_child_elem(self.get_disks_or_mirror_disks_object('mirror-disks', mirror_disks))
+
+ try:
+ self.server.invoke_successfully(aggr_add,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding additional disks to aggregate %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ cserver = netapp_utils.get_cserver(self.server)
+ if cserver is None:
+ server = self.server
+ self.using_vserver_msg = netapp_utils.ERROR_MSG['no_cserver']
+ event_name += ':error_no_cserver'
+ else:
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=cserver)
+ netapp_utils.ems_log_event(event_name, server)
+
+ def map_plex_to_primary_and_mirror(self, plex_disks, disks, mirror_disks):
+ '''
+ we have N plexes, and disks, and maybe mirror_disks
+ we're trying to find which plex is used for disks, and which one, if applicable, for mirror_disks
+ :return: a tuple with the names of the two plexes (disks_plex, mirror_disks_plex)
+ the second one can be None
+ '''
+ disks_plex = None
+ mirror_disks_plex = None
+ error = None
+ for plex in plex_disks:
+ common = set(plex_disks[plex]).intersection(set(disks))
+ if common:
+ if disks_plex is None:
+ disks_plex = plex
+ else:
+ error = 'found overlapping plexes: %s and %s' % (disks_plex, plex)
+ if mirror_disks is not None:
+ common = set(plex_disks[plex]).intersection(set(mirror_disks))
+ if common:
+ if mirror_disks_plex is None:
+ mirror_disks_plex = plex
+ else:
+ error = 'found overlapping mirror plexes: %s and %s' % (mirror_disks_plex, plex)
+ if error is None:
+ # make sure we found a match
+ if disks_plex is None:
+ error = 'cannot not match disks with current aggregate disks'
+ if mirror_disks is not None and mirror_disks_plex is None:
+ if error is not None:
+ error += ', and '
+ error = 'cannot not match mirror_disks with current aggregate disks'
+ if error:
+ self.module.fail_json(msg="Error mapping disks for aggregate %s: %s. Found: %s" %
+ (self.parameters['name'], error, str(plex_disks)))
+ return disks_plex, mirror_disks_plex
+
+ def get_disks_to_add(self, aggr_name, disks, mirror_disks):
+ '''
+ Get list of disks used by the aggregate, as primary and mirror.
+ Report error if:
+ the plexes in use cannot be matched with user inputs (we expect some overlap)
+ the user request requires some disks to be removed (not supported)
+ : return: a tuple of two lists of disks: disks_to_add, mirror_disks_to_add
+ '''
+ # let's see if we need to add disks
+ disks_in_use = self.get_aggr_disks(aggr_name)
+ # we expect a list of tuples (disk_name, plex_name), if there is a mirror, we should have 2 plexes
+ # let's get a list of disks for each plex
+ plex_disks = dict()
+ for disk_name, plex_name in disks_in_use:
+ plex_disks.setdefault(plex_name, []).append(disk_name)
+ # find who is who
+ disks_plex, mirror_disks_plex = self.map_plex_to_primary_and_mirror(plex_disks, disks, mirror_disks)
+ # Now that we know what is which, find what needs to be removed (error), and what needs to be added
+ disks_to_remove = [disk for disk in plex_disks[disks_plex] if disk not in disks]
+ if mirror_disks_plex:
+ disks_to_remove.extend([disk for disk in plex_disks[mirror_disks_plex] if disk not in mirror_disks])
+ if disks_to_remove:
+ error = 'these disks cannot be removed: %s' % str(disks_to_remove)
+ self.module.fail_json(msg="Error removing disks is not supported. Aggregate %s: %s. In use: %s" %
+ (aggr_name, error, str(plex_disks)))
+ # finally, what's to be added
+ disks_to_add = [disk for disk in disks if disk not in plex_disks[disks_plex]]
+ mirror_disks_to_add = list()
+ if mirror_disks_plex:
+ mirror_disks_to_add = [disk for disk in mirror_disks if disk not in plex_disks[mirror_disks_plex]]
+ if mirror_disks_to_add and not disks_to_add:
+ self.module.fail_json(msg="Error cannot add mirror disks %s without adding disks for aggregate %s. In use: %s" %
+ (str(mirror_disks_to_add), aggr_name, str(plex_disks)))
+ if disks_to_add or mirror_disks_to_add:
+ self.na_helper.changed = True
+
+ return disks_to_add, mirror_disks_to_add
+
+ def apply(self):
+ """
+ Apply action to the aggregate
+ :return: None
+ """
+ self.asup_log_for_cserver("na_ontap_aggregate")
+ object_store_cd_action = None
+ aggr_name = self.parameters['name']
+ current = self.get_aggr()
+ # rename and create are mutually exclusive
+ rename, cd_action, object_store_current = None, None, None
+ if self.parameters.get('from_name'):
+ old_aggr = self.get_aggr(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_aggr, current)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming: aggregate %s does not exist" % self.parameters['from_name'])
+ if rename:
+ current = old_aggr
+ aggr_name = self.parameters['from_name']
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if cd_action is None and self.parameters.get('disks') and current is not None:
+ modify['disks_to_add'], modify['mirror_disks_to_add'] = \
+ self.get_disks_to_add(aggr_name, self.parameters['disks'], self.parameters.get('mirror_disks'))
+
+ if modify.get('disk_count'):
+ if int(modify['disk_count']) < int(current['disk_count']):
+ self.module.fail_json(msg="specified disk_count is less than current disk_count. Only adding_disk is allowed.")
+ else:
+ modify['disk_count'] = modify['disk_count'] - current['disk_count']
+
+ if self.parameters.get('object_store_name'):
+ object_store_current = None
+ if current:
+ object_store_current = self.get_object_store(aggr_name)
+ object_store_cd_action = self.na_helper.get_cd_action(object_store_current, self.parameters.get('object_store_name'))
+ if object_store_cd_action is None and object_store_current is not None\
+ and object_store_current['object_store_name'] != self.parameters.get('object_store_name'):
+ self.module.fail_json(msg='Error: object store %s is already associated with aggregate %s.' %
+ (object_store_current['object_store_name'], aggr_name))
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_aggr()
+ elif cd_action == 'delete':
+ self.delete_aggr()
+ else:
+ if rename:
+ self.rename_aggregate()
+ if modify:
+ self.modify_aggr(modify)
+ if object_store_cd_action == 'create':
+ self.attach_object_store_to_aggr()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create Aggregate class instance and invoke apply
+ :return: None
+ """
+ obj_aggr = NetAppOntapAggregate()
+ obj_aggr.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py
new file mode 100644
index 00000000..b5c37dc8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+"""
+create Autosupport module to enable, disable or modify
+"""
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = """
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Enable/Disable Autosupport"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_autosupport
+options:
+ state:
+ description:
+ - Specifies whether the AutoSupport daemon is present or absent.
+ - When this setting is absent, delivery of all AutoSupport messages is turned off.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ node_name:
+ description:
+ - The name of the filer that owns the AutoSupport Configuration.
+ required: true
+ type: str
+ transport:
+ description:
+ - The name of the transport protocol used to deliver AutoSupport messages
+ choices: ['http', 'https', 'smtp']
+ type: str
+ noteto:
+ description:
+ - Specifies up to five recipients of short AutoSupport e-mail messages.
+ elements: str
+ type: list
+ post_url:
+ description:
+ - The URL used to deliver AutoSupport messages via HTTP POST
+ type: str
+ mail_hosts:
+ description:
+ - List of mail server(s) used to deliver AutoSupport messages via SMTP.
+ - Both host names and IP addresses may be used as valid input.
+ type: list
+ elements: str
+ support:
+ description:
+ - Specifies whether AutoSupport notification to technical support is enabled.
+ type: bool
+ from_address:
+ description:
+ - specify the e-mail address from which the node sends AutoSupport messages
+ version_added: 2.8.0
+ type: str
+ partner_addresses:
+ description:
+ - Specifies up to five partner vendor recipients of full AutoSupport e-mail messages.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ to_addresses:
+ description:
+ - Specifies up to five recipients of full AutoSupport e-mail messages.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ proxy_url:
+ description:
+ - specify an HTTP or HTTPS proxy if the 'transport' parameter is set to HTTP or HTTPS and your organization uses a proxy.
+ - If authentication is required, use the format "username:password@host:port".
+ version_added: 2.8.0
+ type: str
+ hostname_in_subject:
+ description:
+ - Specify whether the hostname of the node is included in the subject line of the AutoSupport message.
+ type: bool
+ version_added: 2.8.0
+short_description: NetApp ONTAP Autosupport
+version_added: 2.7.0
+
+"""
+
+EXAMPLES = """
+ - name: Enable autosupport
+ na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ noteto: abc@def.com,def@ghi.com
+ mail_hosts: 1.2.3.4,5.6.7.8
+ support: False
+ post_url: url/1.0/post
+
+ - name: Modify autosupport proxy_url with password
+ na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ proxy_url: username:password@host.com:8000
+
+ - name: Modify autosupport proxy_url without password
+ na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ proxy_url: username@host.com:8000
+
+ - name: Disable autosupport
+ na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ node_name: test
+
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPasup(object):
+ """Class with autosupport methods"""
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ node_name=dict(required=True, type='str'),
+ transport=dict(required=False, type='str', choices=['smtp', 'http', 'https']),
+ noteto=dict(required=False, type='list', elements='str'),
+ post_url=dict(required=False, type='str'),
+ support=dict(required=False, type='bool'),
+ mail_hosts=dict(required=False, type='list', elements='str'),
+ from_address=dict(required=False, type='str'),
+ partner_addresses=dict(required=False, type='list', elements='str'),
+ to_addresses=dict(required=False, type='list', elements='str'),
+ proxy_url=dict(required=False, type='str'),
+ hostname_in_subject=dict(required=False, type='bool'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # present or absent requires modifying state to enabled or disabled
+ self.parameters['service_state'] = 'started' if self.parameters['state'] == 'present' else 'stopped'
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'node_name': 'node-name',
+ 'transport': 'transport',
+ 'post_url': 'post-url',
+ 'from_address': 'from',
+ 'proxy_url': 'proxy-url'
+ }
+ self.na_helper.zapi_list_keys = {
+ 'noteto': ('noteto', 'mail-address'),
+ 'mail_hosts': ('mail-hosts', 'string'),
+ 'partner_addresses': ('partner-address', 'mail-address'),
+ 'to_addresses': ('to', 'mail-address'),
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'support': 'is-support-enabled',
+ 'hostname_in_subject': 'is-node-in-subject'
+ }
+
+ def get_autosupport_config(self):
+ """
+ Invoke zapi - get current autosupport details
+ :return: dict()
+ """
+ asup_details = netapp_utils.zapi.NaElement('autosupport-config-get')
+ asup_details.add_new_child('node-name', self.parameters['node_name'])
+ asup_info = dict()
+ try:
+ result = self.server.invoke_successfully(asup_details, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='%s' % to_native(error),
+ exception=traceback.format_exc())
+ # zapi invoke successful
+ asup_attr_info = result.get_child_by_name('attributes').get_child_by_name('autosupport-config-info')
+ asup_info['service_state'] = 'started' if asup_attr_info['is-enabled'] == 'true' else 'stopped'
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ asup_info[item_key] = asup_attr_info[zapi_key]
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ asup_info[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=asup_attr_info[zapi_key])
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ asup_info[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=asup_attr_info.get_child_by_name(parent)
+ )
+ return asup_info
+
+ def modify_autosupport_config(self, modify):
+ """
+ Invoke zapi - modify autosupport config
+ @return: NaElement object / FAILURE with an error_message
+ """
+ asup_details = {'node-name': self.parameters['node_name']}
+ if modify.get('service_state'):
+ asup_details['is-enabled'] = 'true' if modify.get('service_state') == 'started' else 'false'
+ asup_config = netapp_utils.zapi.NaElement('autosupport-config-modify')
+ for item_key in modify:
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ asup_details[zapi_key] = modify[item_key]
+ elif item_key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item_key)
+ asup_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
+ value=modify[item_key])
+ elif item_key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key)
+ asup_config.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent=parent_key,
+ zapi_child=child_key,
+ data=modify.get(item_key)))
+ asup_config.translate_struct(asup_details)
+ try:
+ return self.server.invoke_successfully(asup_config, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='%s' % to_native(error), exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_autosupport", cserver)
+
+ def apply(self):
+ """
+ Apply action to autosupport
+ """
+ current = self.get_autosupport_config()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.modify_autosupport_config(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ asup_obj = NetAppONTAPasup()
+ asup_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
new file mode 100644
index 00000000..94be8ce5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_dns
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_autosupport_invoke
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP send AutoSupport message
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Send an AutoSupport message from a node
+
+options:
+
+ name:
+ description:
+ - The name of the node to send the message to.
+ - Not specifying this option invokes AutoSupport on all nodes in the cluster.
+ type: str
+
+ autosupport_message:
+ description:
+ - Text sent in the subject line of the AutoSupport message.
+ type: str
+ aliases:
+ - message
+ version_added: 20.8.0
+
+ type:
+ description:
+ - Type of AutoSupport Collection to Issue.
+ choices: ['test', 'performance', 'all']
+ default: 'all'
+ type: str
+
+ uri:
+ description:
+ - send the AutoSupport message to the destination you specify instead of the configured destination.
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Send message
+ na_ontap_autosupport_invoke:
+ name: node1
+ message: invoked test autosupport rest
+ uri: http://1.2.3.4/delivery_uri
+ type: test
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPasupInvoke(object):
+ ''' send ASUP message '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=False, type='str'),
+ autosupport_message=dict(required=False, type='str', aliases=["message"]),
+ type=dict(required=False, choices=[
+ 'test', 'performance', 'all'], default='all'),
+ uri=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API should be used for ONTAP 9.6 or higher.
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if not HAS_NETAPP_LIB:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_nodes(self):
+ nodes = list()
+ node_obj = netapp_utils.zapi.NaElement('system-node-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ node_details_info = netapp_utils.zapi.NaElement('node-details-info')
+ node_details_info.add_new_child('node', '')
+ desired_attributes.add_child_elem(node_details_info)
+ node_obj.add_child_elem(desired_attributes)
+ try:
+ result = self.server.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ node_info = result.get_child_by_name('attributes-list')
+ if node_info is not None:
+ nodes = [node_details.get_child_content('node') for node_details in node_info.get_children()]
+ return nodes
+
+ def send_zapi_message(self, params, node_name):
+ params['node-name'] = node_name
+ send_message = netapp_utils.zapi.NaElement.create_node_with_children('autosupport-invoke', **params)
+ try:
+ self.server.invoke_successfully(send_message, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
+ % (node_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def send_message(self):
+ params = dict()
+ if self.parameters.get('autosupport_message'):
+ params['message'] = self.parameters['autosupport_message']
+ if self.parameters.get('type'):
+ params['type'] = self.parameters['type']
+ if self.parameters.get('uri'):
+ params['uri'] = self.parameters['uri']
+
+ if self.use_rest:
+ if self.parameters.get('name'):
+ params['node.name'] = self.parameters['name']
+ node_name = params['node.name']
+ else:
+ node_name = '*'
+ api = 'support/autosupport/messages'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
+ % (node_name, error))
+ else:
+ if self.parameters.get('name'):
+ node_names = [self.parameters['name']]
+ else:
+ # simulate REST behavior by sending to all nodes in the cluster
+ node_names = self.get_nodes()
+ for name in node_names:
+ self.send_zapi_message(params, name)
+
+ def ems_log_event(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ return netapp_utils.ems_log_event("na_ontap_autosupport_invoke", cserver)
+
+ def apply(self):
+ if not self.use_rest:
+ self.ems_log_event()
+ if self.module.check_mode:
+ pass
+ else:
+ self.send_message()
+ self.module.exit_json(changed=True)
+
+
+def main():
+ message = NetAppONTAPasupInvoke()
+ message.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py
new file mode 100644
index 00000000..646ba410
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_broadcast_domain
+short_description: NetApp ONTAP manage broadcast domains.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify a ONTAP broadcast domain.
+options:
+ state:
+ description:
+ - Whether the specified broadcast domain should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - Specify the broadcast domain name.
+ required: true
+ aliases:
+ - broadcast_domain
+ type: str
+ from_name:
+ description:
+ - Specify the broadcast domain name to be split into new broadcast domain.
+ version_added: 2.8.0
+ type: str
+ mtu:
+ description:
+ - Specify the required mtu for the broadcast domain.
+ type: str
+ ipspace:
+ description:
+ - Specify the required ipspace for the broadcast domain.
+ - A domain ipspace can not be modified after the domain has been created.
+ type: str
+ ports:
+ description:
+ - Specify the ports associated with this broadcast domain. Should be comma separated.
+ - It represents the expected state of a list of ports at any time.
+ - Add a port if it is specified in expected state but not in current state.
+ - Delete a port if it is specified in current state but not in expected state.
+ - For split action, it represents the ports to be split from current broadcast domain and added to the new broadcast domain.
+ - if all ports are removed or split from a broadcast domain, the broadcast domain will be deleted automatically.
+ type: list
+ elements: str
+'''
+
+EXAMPLES = """
+ - name: create broadcast domain
+ na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ mtu: 1000
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"]
+ - name: modify broadcast domain
+ na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ mtu: 1100
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"]
+ - name: split broadcast domain
+ na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ from_name: ansible_domain
+ name: new_ansible_domain
+ mtu: 1200
+ ipspace: Default
+ ports: khutton-vsim1:e0d-12
+ - name: delete broadcast domain
+ na_ontap_broadcast_domain:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ ipspace: Default
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapBroadcastDomain(object):
+ """
+ Create, Modifies and Destroys a Broadcast domain
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Broadcast Domain class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', aliases=["broadcast_domain"]),
+ ipspace=dict(required=False, type='str'),
+ mtu=dict(required=False, type='str'),
+ ports=dict(required=False, type='list', elements='str'),
+ from_name=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_broadcast_domain(self, broadcast_domain=None):
+ """
+ Return details about the broadcast domain
+ :param broadcast_domain: specific broadcast domain to get.
+ :return: Details about the broadcast domain. None if not found.
+ :rtype: dict
+ """
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ domain_exists = None
+ # check if broadcast_domain exists
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').\
+ get_child_by_name('net-port-broadcast-domain-info')
+ domain_name = domain_info.get_child_content('broadcast-domain')
+ domain_mtu = domain_info.get_child_content('mtu')
+ domain_ipspace = domain_info.get_child_content('ipspace')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ else:
+ ports = []
+ domain_exists = {
+ 'domain-name': domain_name,
+ 'mtu': domain_mtu,
+ 'ipspace': domain_ipspace,
+ 'ports': ports
+ }
+ return domain_exists
+
+ def create_broadcast_domain(self):
+ """
+ Creates a new broadcast domain
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-create')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if self.parameters.get('mtu'):
+ domain_obj.add_new_child("mtu", self.parameters['mtu'])
+ if self.parameters.get('ports'):
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in self.parameters['ports']:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_broadcast_domain(self, broadcast_domain=None):
+ """
+ Deletes a broadcast domain
+ """
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-destroy')
+ domain_obj.add_new_child("broadcast-domain", broadcast_domain)
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting broadcast domain %s: %s' %
+ (broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_broadcast_domain(self):
+ """
+ Modifies ipspace and mtu options of a broadcast domain
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-modify')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('mtu'):
+ domain_obj.add_new_child("mtu", self.parameters['mtu'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def split_broadcast_domain(self):
+ """
+ split broadcast domain
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-split')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['from_name'])
+ domain_obj.add_new_child("new-broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ports'):
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in self.parameters['ports']:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error splitting broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if len(self.get_broadcast_domain_ports(self.parameters['from_name'])) == 0:
+ self.delete_broadcast_domain(self.parameters['from_name'])
+
+ def modify_redirect(self, modify):
+ """
+ :param modify: modify attributes.
+ """
+ for attribute in modify.keys():
+ if attribute == 'mtu':
+ self.modify_broadcast_domain()
+ if attribute == 'ports':
+ self.modify_broadcast_domain_ports()
+
+ def get_modify_attributes(self, current, split):
+ """
+ :param current: current state.
+ :param split: True or False of split action.
+ :return: list of modified attributes.
+ """
+ modify = None
+ if self.parameters['state'] == 'present':
+ # split already handled ipspace and ports.
+ if self.parameters.get('from_name'):
+ current = self.get_broadcast_domain(self.parameters['from_name'])
+ if split:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify.get('ipspace'):
+ del modify['ipspace']
+ if modify.get('ports'):
+ del modify['ports']
+ # ipspace can not be modified.
+ else:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify.get('ipspace'):
+ self.module.fail_json(msg='A domain ipspace can not be modified after the domain has been created.',
+ exception=traceback.format_exc())
+ return modify
+
+ def modify_broadcast_domain_ports(self):
+ """
+ compare current and desire ports. Call add or remove ports methods if needed.
+ :return: None.
+ """
+ current_ports = self.get_broadcast_domain_ports()
+ expect_ports = self.parameters['ports']
+ # if want to remove all ports, simply delete the broadcast domain.
+ if len(expect_ports) == 0:
+ self.delete_broadcast_domain()
+ return
+ ports_to_remove = list(set(current_ports) - set(expect_ports))
+ ports_to_add = list(set(expect_ports) - set(current_ports))
+
+ if len(ports_to_add) > 0:
+ self.add_broadcast_domain_ports(ports_to_add)
+
+ if len(ports_to_remove) > 0:
+ self.delete_broadcast_domain_ports(ports_to_remove)
+
+ def add_broadcast_domain_ports(self, ports):
+ """
+ Creates new broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ :param: ports to be deleted.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_broadcast_domain_ports(self, broadcast_domain=None):
+ """
+ Return details about the broadcast domain ports.
+ :return: Details about the broadcast domain ports. None if not found.
+ :rtype: list
+ """
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ ports = []
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ return ports
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ self.asup_log_for_cserver("na_ontap_broadcast_domain")
+ current = self.get_broadcast_domain()
+ cd_action, split = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create':
+ # either create new domain or split domain.
+ if self.parameters.get('from_name'):
+ split = self.na_helper.is_rename_action(self.get_broadcast_domain(self.parameters['from_name']), current)
+ if split is None:
+ self.module.fail_json(msg='A domain can not be split if it does not exist.',
+ exception=traceback.format_exc())
+ if split:
+ cd_action = None
+ modify = self.get_modify_attributes(current, split)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if split:
+ self.split_broadcast_domain()
+ if cd_action == 'create':
+ self.create_broadcast_domain()
+ elif cd_action == 'delete':
+ self.delete_broadcast_domain()
+ elif modify:
+ self.modify_redirect(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ """
+ Creates the NetApp ONTAP Broadcast Domain Object that can be created, deleted and modified.
+ """
+ obj = NetAppOntapBroadcastDomain()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py
new file mode 100644
index 00000000..6a67beb5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_broadcast_domain_ports
+short_description: NetApp ONTAP manage broadcast domain ports
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add or remove ONTAP broadcast domain ports. Existing ports that are not listed are kept.
+options:
+ state:
+ description:
+ - Whether the specified broadcast domain should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ broadcast_domain:
+ description:
+ - Specify the broadcast_domain name
+ required: true
+ type: str
+ ipspace:
+ description:
+ - Specify the ipspace for the broadcast domain
+ type: str
+ ports:
+ description:
+ - Specify the list of ports to add to or remove from this broadcast domain.
+ required: true
+ type: list
+ elements: str
+
+'''
+
+EXAMPLES = """
+ - name: create broadcast domain ports
+ na_ontap_broadcast_domain_ports:
+ state=present
+ username={{ netapp_username }}
+ password={{ netapp_password }}
+ hostname={{ netapp_hostname }}
+ broadcast_domain=123kevin
+ ports=khutton-vsim1:e0d-13
+ - name: delete broadcast domain ports
+ na_ontap_broadcast_domain_ports:
+ state=absent
+ username={{ netapp_username }}
+ password={{ netapp_password }}
+ hostname={{ netapp_hostname }}
+ broadcast_domain=123kevin
+ ports=khutton-vsim1:e0d-13
+"""
+
+RETURN = """
+
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapBroadcastDomainPorts(object):
+ """
+ Create and Destroys Broadcast Domain Ports
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap Net Route class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ broadcast_domain=dict(required=True, type='str'),
+ ipspace=dict(required=False, type='str', default=None),
+ ports=dict(required=True, type='list', elements='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.state = parameters['state']
+ self.broadcast_domain = parameters['broadcast_domain']
+ self.ipspace = parameters['ipspace']
+ self.ports = parameters['ports']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_broadcast_domain_ports(self):
+ """
+ Return details about the broadcast domain ports
+ :param:
+ name : broadcast domain name
+ :return: Details about the broadcast domain. None if not found.
+ :rtype: dict
+ """
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', self.broadcast_domain)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ domain_exists = None
+ # check if broadcast domain exists
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_name = domain_info.get_child_content('broadcast-domain')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ else:
+ ports = []
+ domain_exists = {
+ 'domain-name': domain_name,
+ 'ports': ports
+ }
+ return domain_exists
+
+ def create_broadcast_domain_ports(self, ports):
+ """
+ Creates new broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.broadcast_domain)
+ if self.ipspace:
+ domain_obj.add_new_child("ipspace", self.ipspace)
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' %
+ (self.broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.broadcast_domain)
+ if self.ipspace:
+ domain_obj.add_new_child("ipspace", self.ipspace)
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' %
+ (self.broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ changed = False
+ broadcast_domain_details = self.get_broadcast_domain_ports()
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_broadcast_domain_ports", cserver)
+ if broadcast_domain_details is None:
+ self.module.fail_json(msg='Error broadcast domain not found: %s' % self.broadcast_domain)
+ if self.state == 'present': # execute create
+ ports_to_add = [port for port in self.ports if port not in broadcast_domain_details['ports']]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ self.create_broadcast_domain_ports(ports_to_add)
+ changed = True
+ elif self.state == 'absent': # execute delete
+ ports_to_delete = [port for port in self.ports if port in broadcast_domain_details['ports']]
+ if len(ports_to_delete) > 0:
+ if not self.module.check_mode:
+ self.delete_broadcast_domain_ports(ports_to_delete)
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapBroadcastDomainPorts()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py
new file mode 100644
index 00000000..bf0c035e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP manage consistency group snapshot
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create consistency group snapshot for ONTAP volumes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cg_snapshot
+options:
+ state:
+ description:
+ - If you want to create a snapshot.
+ default: present
+ type: str
+ vserver:
+ required: true
+ type: str
+ description:
+ - Name of the vserver.
+ volumes:
+ required: true
+ type: list
+ elements: str
+ description:
+ - A list of volumes in this filer that is part of this CG operation.
+ snapshot:
+ required: true
+ type: str
+ description:
+ - The provided name of the snapshot that is created in each volume.
+ timeout:
+ description:
+ - Timeout selector.
+ choices: ['urgent', 'medium', 'relaxed']
+ type: str
+ default: medium
+ snapmirror_label:
+ description:
+ - A human readable SnapMirror label to be attached with the consistency group snapshot copies.
+ type: str
+version_added: 2.7.0
+
+'''
+
+EXAMPLES = """
+ - name:
+ na_ontap_cg_snapshot:
+ state: present
+ vserver: vserver_name
+ snapshot: snapshot name
+ volumes: vol_name
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCGSnapshot(object):
+ """
+ Methods to create CG snapshots
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ vserver=dict(required=True, type='str'),
+ volumes=dict(required=True, type='list', elements='str'),
+ snapshot=dict(required=True, type='str'),
+ timeout=dict(required=False, type='str', choices=[
+ 'urgent', 'medium', 'relaxed'], default='medium'),
+ snapmirror_label=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ parameters = self.module.params
+
+ # set up variables
+ self.state = parameters['state']
+ self.vserver = parameters['vserver']
+ self.volumes = parameters['volumes']
+ self.snapshot = parameters['snapshot']
+ self.timeout = parameters['timeout']
+ self.snapmirror_label = parameters['snapmirror_label']
+ self.cgid = None
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.vserver)
+
+ def does_snapshot_exist(self, volume):
+ """
+ This is duplicated from na_ontap_snapshot
+ Checks to see if a snapshot exists or not
+ :return: Return True if a snapshot exists, false if it dosn't
+ """
+ # TODO: Remove this method and import snapshot module and
+ # call get after re-factoring __init__ across all the modules
+ # we aren't importing now, since __init__ does a lot of Ansible setup
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter")
+ desired_attr = netapp_utils.zapi.NaElement("desired-attributes")
+ snapshot_info = netapp_utils.zapi.NaElement('snapshot-info')
+ comment = netapp_utils.zapi.NaElement('comment')
+ # add more desired attributes that are allowed to be modified
+ snapshot_info.add_child_elem(comment)
+ desired_attr.add_child_elem(snapshot_info)
+ snapshot_obj.add_child_elem(desired_attr)
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.snapshot)
+ snapshot_info_obj.add_new_child("volume", volume)
+ snapshot_info_obj.add_new_child("vserver", self.vserver)
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ snap_info = attributes_list.get_child_by_name('snapshot-info')
+ return_value = {'comment': snap_info.get_child_content('comment')}
+ return return_value
+
+ def cgcreate(self):
+ """
+ Calls cg-start and cg-commit (when cg-start succeeds)
+ """
+ started = self.cg_start()
+ if started:
+ if self.cgid is not None:
+ self.cg_commit()
+ else:
+ self.module.fail_json(msg="Error fetching CG ID for CG commit %s" % self.snapshot,
+ exception=traceback.format_exc())
+ return started
+
+ def cg_start(self):
+ """
+ For the given list of volumes, creates cg-snapshot
+ """
+ snapshot_started = False
+ cgstart = netapp_utils.zapi.NaElement("cg-start")
+ cgstart.add_new_child("snapshot", self.snapshot)
+ cgstart.add_new_child("timeout", self.timeout)
+ volume_list = netapp_utils.zapi.NaElement("volumes")
+ cgstart.add_child_elem(volume_list)
+ for vol in self.volumes:
+ snapshot_exists = self.does_snapshot_exist(vol)
+ if snapshot_exists is None:
+ snapshot_started = True
+ volume_list.add_new_child("volume-name", vol)
+ if snapshot_started:
+ if self.snapmirror_label:
+ cgstart.add_new_child("snapmirror-label",
+ self.snapmirror_label)
+ try:
+ cgresult = self.server.invoke_successfully(
+ cgstart, enable_tunneling=True)
+ if cgresult.get_child_by_name('cg-id'):
+ self.cgid = cgresult['cg-id']
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating CG snapshot %s: %s" %
+ (self.snapshot, to_native(error)),
+ exception=traceback.format_exc())
+ return snapshot_started
+
+ def cg_commit(self):
+ """
+ When cg-start is successful, performs a cg-commit with the cg-id
+ """
+ cgcommit = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cg-commit', **{'cg-id': self.cgid})
+ try:
+ self.server.invoke_successfully(cgcommit,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error committing CG snapshot %s: %s" %
+ (self.snapshot, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Applies action from playbook'''
+ netapp_utils.ems_log_event("na_ontap_cg_snapshot", self.server)
+ if not self.module.check_mode:
+ changed = self.cgcreate()
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ '''Execute action from playbook'''
+ cg_obj = NetAppONTAPCGSnapshot()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py
new file mode 100644
index 00000000..dbf565a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# import untangle
+
+'''
+na_ontap_cifs
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create or destroy or modify(path) cifs-share on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cifs
+
+options:
+
+ path:
+ description:
+ The file system path that is shared through this CIFS share. The path is the full, user visible path relative
+ to the vserver root, and it might be crossing junction mount points. The path is in UTF8 and uses forward
+ slash as directory separator
+ required: false
+ type: str
+
+ vserver:
+ description:
+ - "Vserver containing the CIFS share."
+ required: true
+ type: str
+
+ share_name:
+ description:
+ The name of the CIFS share. The CIFS share name is a UTF-8 string with the following characters being
+ illegal; control characters from 0x00 to 0x1F, both inclusive, 0x22 (double quotes)
+ required: true
+ type: str
+
+ share_properties:
+ description:
+ - The list of properties for the CIFS share
+ required: false
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ symlink_properties:
+ description:
+ - The list of symlink properties for this CIFS share
+ required: false
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified CIFS share should exist or not."
+ required: false
+ type: str
+ default: present
+
+ vscan_fileop_profile:
+ choices: ['no_scan', 'standard', 'strict', 'writes_only']
+ description:
+ - Profile_set of file_ops to which vscan on access scanning is applicable.
+ required: false
+ type: str
+ version_added: 2.9.0
+
+short_description: NetApp ONTAP Manage cifs-share
+version_added: 2.6.0
+
+'''
+
+EXAMPLES = """
+ - name: Create CIFS share
+ na_ontap_cifs:
+ state: present
+ share_name: cifsShareName
+ path: /
+ vserver: vserverName
+ share_properties: browsable,oplocks
+ symlink_properties: read_only,enable
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete CIFS share
+ na_ontap_cifs:
+ state: absent
+ share_name: cifsShareName
+ vserver: vserverName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Modify path CIFS share
+ na_ontap_cifs:
+ state: present
+ share_name: pb_test
+ vserver: vserverName
+ path: /
+ share_properties: show_previous_versions
+ symlink_properties: disable
+ vscan_fileop_profile: no_scan
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCifsShare(object):
+ """
+ Methods to create/delete/modify(path) CIFS share
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ share_name=dict(required=True, type='str'),
+ path=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ share_properties=dict(required=False, type='list', elements='str'),
+ symlink_properties=dict(required=False, type='list', elements='str'),
+ vscan_fileop_profile=dict(required=False, type='str', choices=['no_scan', 'standard', 'strict', 'writes_only'])
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters.get('vserver'))
+
+ def get_cifs_share(self):
+ """
+ Return details about the cifs-share
+ :param:
+ name : Name of the cifs-share
+ :return: Details about the cifs-share. None if not found.
+ :rtype: dict
+ """
+ cifs_iter = netapp_utils.zapi.NaElement('cifs-share-get-iter')
+ cifs_info = netapp_utils.zapi.NaElement('cifs-share')
+ cifs_info.add_new_child('share-name', self.parameters.get('share_name'))
+ cifs_info.add_new_child('vserver', self.parameters.get('vserver'))
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_info)
+
+ cifs_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(cifs_iter, True)
+
+ return_value = None
+ # check if query returns the expected cifs-share
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ properties_list = []
+ symlink_list = []
+ cifs_attrs = result.get_child_by_name('attributes-list').\
+ get_child_by_name('cifs-share')
+ if cifs_attrs.get_child_by_name('share-properties'):
+ properties_attrs = cifs_attrs['share-properties']
+ if properties_attrs is not None:
+ properties_list = [property.get_content() for property in properties_attrs.get_children()]
+ if cifs_attrs.get_child_by_name('symlink-properties'):
+ symlink_attrs = cifs_attrs['symlink-properties']
+ if symlink_attrs is not None:
+ symlink_list = [symlink.get_content() for symlink in symlink_attrs.get_children()]
+ return_value = {
+ 'share': cifs_attrs.get_child_content('share-name'),
+ 'path': cifs_attrs.get_child_content('path'),
+ 'share_properties': properties_list,
+ 'symlink_properties': symlink_list
+ }
+ if cifs_attrs.get_child_by_name('vscan-fileop-profile'):
+ return_value['vscan_fileop_profile'] = cifs_attrs['vscan-fileop-profile']
+
+ return return_value
+
+ def create_cifs_share(self):
+ """
+ Create CIFS share
+ """
+ options = {'share-name': self.parameters.get('share_name'),
+ 'path': self.parameters.get('path')}
+ cifs_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-create', **options)
+ if self.parameters.get('share_properties'):
+ property_attrs = netapp_utils.zapi.NaElement('share-properties')
+ cifs_create.add_child_elem(property_attrs)
+ for aproperty in self.parameters.get('share_properties'):
+ property_attrs.add_new_child('cifs-share-properties', aproperty)
+ if self.parameters.get('symlink_properties'):
+ symlink_attrs = netapp_utils.zapi.NaElement('symlink-properties')
+ cifs_create.add_child_elem(symlink_attrs)
+ for symlink in self.parameters.get('symlink_properties'):
+ symlink_attrs.add_new_child('cifs-share-symlink-properties', symlink)
+ if self.parameters.get('vscan_fileop_profile'):
+ fileop_attrs = netapp_utils.zapi.NaElement('vscan-fileop-profile')
+ fileop_attrs.set_content(self.parameters['vscan_fileop_profile'])
+ cifs_create.add_child_elem(fileop_attrs)
+
+ try:
+ self.server.invoke_successfully(cifs_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+
+ self.module.fail_json(msg='Error creating cifs-share %s: %s'
+ % (self.parameters.get('share_name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_cifs_share(self):
+ """
+ Delete CIFS share
+ """
+ cifs_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-delete', **{'share-name': self.parameters.get('share_name')})
+
+ try:
+ self.server.invoke_successfully(cifs_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cifs-share %s: %s'
+ % (self.parameters.get('share_name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_cifs_share(self):
+ """
+ modilfy path for the given CIFS share
+ """
+ options = {'share-name': self.parameters.get('share_name')}
+ cifs_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-modify', **options)
+ if self.parameters.get('path'):
+ cifs_modify.add_new_child('path', self.parameters.get('path'))
+ if self.parameters.get('share_properties'):
+ property_attrs = netapp_utils.zapi.NaElement('share-properties')
+ cifs_modify.add_child_elem(property_attrs)
+ for aproperty in self.parameters.get('share_properties'):
+ property_attrs.add_new_child('cifs-share-properties', aproperty)
+ if self.parameters.get('symlink_properties'):
+ symlink_attrs = netapp_utils.zapi.NaElement('symlink-properties')
+ cifs_modify.add_child_elem(symlink_attrs)
+ for aproperty in self.parameters.get('symlink_properties'):
+ symlink_attrs.add_new_child('cifs-share-symlink-properties', aproperty)
+ if self.parameters.get('vscan_fileop_profile'):
+ fileop_attrs = netapp_utils.zapi.NaElement('vscan-fileop-profile')
+ fileop_attrs.set_content(self.parameters['vscan_fileop_profile'])
+ cifs_modify.add_child_elem(fileop_attrs)
+ try:
+ self.server.invoke_successfully(cifs_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cifs-share %s:%s'
+ % (self.parameters.get('share_name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Apply action to cifs share'''
+ netapp_utils.ems_log_event("na_ontap_cifs", self.server)
+ current = self.get_cifs_share()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+ if cd_action is None:
+ # ZAPI accepts both 'show-previous-versions' and 'show_previous_versions', but only returns the latter
+ if 'show-previous-versions' in self.parameters.get('share_properties', []) and\
+ current and 'show_previous_versions' in current.get('share_properties', []):
+ self.parameters['share_properties'].remove('show-previous-versions')
+ self.parameters['share_properties'].append('show_previous_versions')
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_share()
+ elif cd_action == 'delete':
+ self.delete_cifs_share()
+ elif modify:
+ self.modify_cifs_share()
+ results = dict(changed=self.na_helper.changed)
+ if modify and netapp_utils.has_feature(self.module, 'show_modified'):
+ results['modify'] = str(modify)
+ self.module.exit_json(**results)
+
+
+def main():
+ '''Execute action from playbook'''
+ cifs_obj = NetAppONTAPCifsShare()
+ cifs_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py
new file mode 100644
index 00000000..90987afe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create or destroy or modify cifs-share-access-controls on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cifs_acl
+options:
+ permission:
+ choices: ['no_access', 'read', 'change', 'full_control']
+ type: str
+ description:
+ -"The access rights that the user or group has on the defined CIFS share."
+ share_name:
+ description:
+ - "The name of the cifs-share-access-control to manage."
+ required: true
+ type: str
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified CIFS share acl should exist or not."
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ user_or_group:
+ description:
+ - "The user or group name for which the permissions are listed."
+ required: true
+ type: str
+short_description: NetApp ONTAP manage cifs-share-access-control
+version_added: 2.6.0
+
+'''
+
+EXAMPLES = """
+ - name: Create CIFS share acl
+ na_ontap_cifs_acl:
+ state: present
+ share_name: cifsShareName
+ user_or_group: Everyone
+ permission: read
+ vserver: "{{ netapp_vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Modify CIFS share acl permission
+ na_ontap_cifs_acl:
+ state: present
+ share_name: cifsShareName
+ user_or_group: Everyone
+ permission: change
+ vserver: "{{ netapp_vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCifsAcl(object):
+ """
+ Methods to create/delete/modify CIFS share/user access-control
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ share_name=dict(required=True, type='str'),
+ user_or_group=dict(required=True, type='str'),
+ permission=dict(required=False, type='str', choices=['no_access', 'read', 'change', 'full_control'])
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['share_name', 'user_or_group', 'permission'])
+ ],
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.state = parameters['state']
+ self.vserver = parameters['vserver']
+ self.share_name = parameters['share_name']
+ self.user_or_group = parameters['user_or_group']
+ self.permission = parameters['permission']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_cifs_acl(self):
+ """
+ Return details about the cifs-share-access-control
+ :param:
+ name : Name of the cifs-share-access-control
+ :return: Details about the cifs-share-access-control. None if not found.
+ :rtype: dict
+ """
+ cifs_acl_iter = netapp_utils.zapi.NaElement('cifs-share-access-control-get-iter')
+ cifs_acl_info = netapp_utils.zapi.NaElement('cifs-share-access-control')
+ cifs_acl_info.add_new_child('share', self.share_name)
+ cifs_acl_info.add_new_child('user-or-group', self.user_or_group)
+ cifs_acl_info.add_new_child('vserver', self.vserver)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_acl_info)
+ cifs_acl_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(cifs_acl_iter, True)
+ return_value = None
+ # check if query returns the expected cifs-share-access-control
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ cifs_acl = result.get_child_by_name('attributes-list').get_child_by_name('cifs-share-access-control')
+ return_value = {
+ 'share': cifs_acl.get_child_content('share'),
+ 'user-or-group': cifs_acl.get_child_content('user-or-group'),
+ 'permission': cifs_acl.get_child_content('permission')
+ }
+
+ return return_value
+
+ def create_cifs_acl(self):
+ """
+ Create access control for the given CIFS share/user-group
+ """
+ cifs_acl_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-create', **{'share': self.share_name,
+ 'user-or-group': self.user_or_group,
+ 'permission': self.permission})
+ try:
+ self.server.invoke_successfully(cifs_acl_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+
+ self.module.fail_json(msg='Error creating cifs-share-access-control %s: %s'
+ % (self.share_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_cifs_acl(self):
+ """
+ Delete access control for the given CIFS share/user-group
+ """
+ cifs_acl_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-delete', **{'share': self.share_name,
+ 'user-or-group': self.user_or_group})
+ try:
+ self.server.invoke_successfully(cifs_acl_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cifs-share-access-control %s: %s'
+ % (self.share_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_cifs_acl_permission(self):
+ """
+ Change permission for the given CIFS share/user-group
+ """
+ cifs_acl_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-modify', **{'share': self.share_name,
+ 'user-or-group': self.user_or_group,
+ 'permission': self.permission})
+ try:
+ self.server.invoke_successfully(cifs_acl_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cifs-share-access-control permission %s:%s'
+ % (self.share_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to cifs-share-access-control
+ """
+ changed = False
+ cifs_acl_exists = False
+ netapp_utils.ems_log_event("na_ontap_cifs_acl", self.server)
+ cifs_acl_details = self.get_cifs_acl()
+ if cifs_acl_details:
+ cifs_acl_exists = True
+ if self.state == 'absent': # delete
+ changed = True
+ elif self.state == 'present':
+ if cifs_acl_details['permission'] != self.permission: # rename
+ changed = True
+ else:
+ if self.state == 'present': # create
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present': # execute create
+ if not cifs_acl_exists:
+ self.create_cifs_acl()
+ else: # execute modify
+ self.modify_cifs_acl_permission()
+ elif self.state == 'absent': # execute delete
+ self.delete_cifs_acl()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ cifs_acl = NetAppONTAPCifsAcl()
+ cifs_acl.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py
new file mode 100644
index 00000000..d1bb6b19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+""" this is cifs_server module
+
+ (c) 2018-2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+---
+module: na_ontap_cifs_server
+short_description: NetApp ONTAP CIFS server configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Creating / deleting and modifying the CIFS server .
+
+options:
+
+ state:
+ description:
+ - Whether the specified cifs_server should exist or not.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ service_state:
+ description:
+ - CIFS Server Administrative Status.
+ choices: ['stopped', 'started']
+ type: str
+
+ name:
+ description:
+ - Specifies the cifs_server name.
+ required: true
+ aliases: ['cifs_server_name']
+ type: str
+
+ admin_user_name:
+ description:
+ - Specifies the cifs server admin username.
+ - When used with absent, the account will be deleted if admin_password is also provided.
+ type: str
+
+ admin_password:
+ description:
+ - Specifies the cifs server admin password.
+ - When used with absent, the account will be deleted if admin_user_name is also provided.
+ type: str
+
+ domain:
+ description:
+ - The Fully Qualified Domain Name of the Windows Active Directory this CIFS server belongs to.
+ type: str
+
+ workgroup:
+ description:
+ - The NetBIOS name of the domain or workgroup this CIFS server belongs to.
+ type: str
+
+ ou:
+ description:
+ - The Organizational Unit (OU) within the Windows Active Directory
+ this CIFS server belongs to.
+ version_added: 2.7.0
+ type: str
+
+ force:
+ type: bool
+ description:
+ - If this is set and a machine account with the same name as
+ specified in 'name' exists in the Active Directory, it
+ will be overwritten and reused.
+ version_added: 2.7.0
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Create cifs_server
+ na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: stopped
+ domain: "{{ id_domain }}"
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete cifs_server
+ na_ontap_cifs_server:
+ state: absent
+ name: data2
+ vserver: svm1
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Start cifs_server
+ na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: started
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Stop cifs_server
+ na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: stopped
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify cifs_server
+ na_ontap_cifs_server:
+ state: present
+ name: data2_new
+ vserver: svm1
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapcifsServer(object):
+ """
+ object to describe cifs_server info
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, choices=['stopped', 'started']),
+ name=dict(required=True, type='str', aliases=['cifs_server_name']),
+ workgroup=dict(required=False, type='str', default=None),
+ domain=dict(required=False, type='str'),
+ admin_user_name=dict(required=False, type='str'),
+ admin_password=dict(required=False, type='str', no_log=True),
+ ou=dict(required=False, type='str'),
+ force=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ params = self.module.params
+
+ # set up state variables
+ self.state = params['state']
+ self.cifs_server_name = params['name']
+ self.workgroup = params['workgroup']
+ self.domain = params['domain']
+ self.vserver = params['vserver']
+ self.service_state = params['service_state']
+ self.admin_user_name = params['admin_user_name']
+ self.admin_password = params['admin_password']
+ self.ou = params['ou']
+ self.force = params['force']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_cifs_server(self):
+ """
+ Return details about the CIFS-server
+ :param:
+ name : Name of the name of the cifs_server
+
+ :return: Details about the cifs_server. None if not found.
+ :rtype: dict
+ """
+ cifs_server_info = netapp_utils.zapi.NaElement('cifs-server-get-iter')
+ cifs_server_attributes = netapp_utils.zapi.NaElement('cifs-server-config')
+ cifs_server_attributes.add_new_child('cifs-server', self.cifs_server_name)
+ cifs_server_attributes.add_new_child('vserver', self.vserver)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_server_attributes)
+ cifs_server_info.add_child_elem(query)
+ result = self.server.invoke_successfully(cifs_server_info, True)
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ cifs_server_attributes = result.get_child_by_name('attributes-list').\
+ get_child_by_name('cifs-server-config')
+ return_value = {
+ 'cifs_server_name': self.cifs_server_name,
+ 'administrative-status': cifs_server_attributes.get_child_content('administrative-status')
+ }
+
+ return return_value
+
+ def create_cifs_server(self):
+ """
+ calling zapi to create cifs_server
+ """
+ options = {'cifs-server': self.cifs_server_name, 'administrative-status': 'up'
+ if self.service_state == 'started' else 'down'}
+ if self.workgroup is not None:
+ options['workgroup'] = self.workgroup
+ if self.domain is not None:
+ options['domain'] = self.domain
+ if self.admin_user_name is not None:
+ options['admin-username'] = self.admin_user_name
+ if self.admin_password is not None:
+ options['admin-password'] = self.admin_password
+ if self.ou is not None:
+ options['organizational-unit'] = self.ou
+ if self.force is not None:
+ options['force-account-overwrite'] = str(self.force).lower()
+
+ cifs_server_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-create', **options)
+
+ try:
+ self.server.invoke_successfully(cifs_server_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error Creating cifs_server %s: %s' %
+ (self.cifs_server_name, to_native(exc)), exception=traceback.format_exc())
+
+ def delete_cifs_server(self):
+ """
+ calling zapi to create cifs_server
+ """
+ if self.cifs_server_name == 'up':
+ self.modify_cifs_server(admin_status='down')
+
+ options = dict()
+ if self.admin_user_name is not None:
+ options['admin-username'] = self.admin_user_name
+ if self.admin_password is not None:
+ options['admin-password'] = self.admin_password
+
+ if options:
+ cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete', **options)
+ else:
+ cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete')
+
+ try:
+ self.server.invoke_successfully(cifs_server_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting cifs_server %s: %s' % (self.cifs_server_name, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def modify_cifs_server(self, admin_status):
+ """
+ RModify the cifs_server.
+ """
+ cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-modify', **{'cifs-server': self.cifs_server_name,
+ 'administrative-status': admin_status, 'vserver': self.vserver})
+ try:
+ self.server.invoke_successfully(cifs_server_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def start_cifs_server(self):
+ """
+ RModify the cifs_server.
+ """
+ cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-start')
+ try:
+ self.server.invoke_successfully(cifs_server_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def stop_cifs_server(self):
+ """
+ RModify the cifs_server.
+ """
+ cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-stop')
+ try:
+ self.server.invoke_successfully(cifs_server_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ calling all cifs_server features
+ """
+
+ changed = False
+ cifs_server_exists = False
+ netapp_utils.ems_log_event("na_ontap_cifs_server", self.server)
+ cifs_server_detail = self.get_cifs_server()
+
+ if cifs_server_detail:
+ cifs_server_exists = True
+
+ if self.state == 'present':
+ administrative_status = cifs_server_detail['administrative-status']
+ if self.service_state == 'started' and administrative_status == 'down':
+ changed = True
+ if self.service_state == 'stopped' and administrative_status == 'up':
+ changed = True
+ else:
+ # we will delete the CIFs server
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not cifs_server_exists:
+ self.create_cifs_server()
+
+ elif self.service_state == 'stopped':
+ self.stop_cifs_server()
+
+ elif self.service_state == 'started':
+ self.start_cifs_server()
+
+ elif self.state == 'absent':
+ self.delete_cifs_server()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ cifs_server = NetAppOntapcifsServer()
+ cifs_server.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py
new file mode 100644
index 00000000..f8f3ce59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py
@@ -0,0 +1,525 @@
+#!/usr/bin/python
+
+# (c) 2017-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_cluster
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_cluster
+short_description: NetApp ONTAP cluster - create a cluster and add/remove nodes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create ONTAP cluster.
+- Add or remove cluster nodes using cluster_ip_address.
+- Adding a node requires ONTAP 9.3 or better.
+- Removing a node requires ONTAP 9.4 or better.
+options:
+ state:
+ description:
+ - Whether the specified cluster should exist (deleting a cluster is not supported).
+ - Whether the node identified by its cluster_ip_address should be in the cluster or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ cluster_name:
+ description:
+ - The name of the cluster to manage.
+ type: str
+ cluster_ip_address:
+ description:
+ - intra cluster IP address of the node to be added or removed.
+ type: str
+ single_node_cluster:
+ description:
+ - Whether the cluster is a single node cluster. Ignored for 9.3 or older versions.
+ - If present, it was observed that 'Cluster' interfaces were deleted, whatever the value.
+ version_added: 19.11.0
+ type: bool
+ cluster_location:
+ description:
+ - Cluster location, only relevant if performing a modify action.
+ version_added: 19.11.0
+ type: str
+ cluster_contact:
+ description:
+ - Cluster contact, only relevant if performing a modify action.
+ version_added: 19.11.0
+ type: str
+ node_name:
+ description:
+ - Name of the node to be added or removed from the cluster.
+ - Be aware that when adding a node, '-' are converted to '_' by the ONTAP backend.
+ - When creating a cluster, C(node_name) is ignored.
+ - When adding a node using C(cluster_ip_address), C(node_name) is optional.
+ - When used to remove a node, C(cluster_ip_address) and C(node_name) are mutually exclusive.
+ version_added: 20.9.0
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create cluster
+ na_ontap_cluster:
+ state: present
+ cluster_name: new_cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add node to cluster (Join cluster)
+ na_ontap_cluster:
+ state: present
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add node to cluster (Join cluster)
+ na_ontap_cluster:
+ state: present
+ cluster_ip_address: 10.10.10.10
+ node_name: my_preferred_node_name
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Create a 2 node cluster in one call
+ na_ontap_cluster:
+ state: present
+ cluster_name: new_cluster
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Remove node from cluster
+ na_ontap_cluster:
+ state: absent
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Remove node from cluster
+ na_ontap_cluster:
+ state: absent
+ node_name: node002
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: modify cluster
+ na_ontap_cluster:
+ state: present
+ cluster_contact: testing
+ cluster_location: testing
+ cluster_name: "{{ netapp_cluster}}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCluster(object):
+ """
+ object initialize and class methods
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ cluster_name=dict(required=False, type='str'),
+ cluster_ip_address=dict(required=False, type='str'),
+ cluster_location=dict(required=False, type='str'),
+ cluster_contact=dict(required=False, type='str'),
+ single_node_cluster=dict(required=False, type='bool'),
+ node_name=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.warnings = list()
+
+ if self.parameters['state'] == 'absent' and self.parameters.get('node_name') is not None and self.parameters.get('cluster_ip_address') is not None:
+ msg = 'when state is "absent", parameters are mutually exclusive: cluster_ip_address|node_name'
+ self.module.fail_json(msg=msg)
+
+ if self.parameters.get('node_name') is not None and '-' in self.parameters.get('node_name'):
+ self.warnings.append('ONTAP ZAPI converts "-" to "_", node_name: %s may be changed or not matched' % self.parameters.get('node_name'))
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_cluster_identity(self, ignore_error=True):
+ ''' get cluster information, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a dictionary of attributes
+ '''
+ zapi = netapp_utils.zapi.NaElement('cluster-identity-get')
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return None
+ self.module.fail_json(msg='Error fetching cluster identity info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ cluster_identity = dict()
+ if result.get_child_by_name('attributes'):
+ identity_info = result.get_child_by_name('attributes').get_child_by_name('cluster-identity-info')
+ if identity_info:
+ cluster_identity['cluster_contact'] = identity_info.get_child_content('cluster-contact')
+ cluster_identity['cluster_location'] = identity_info.get_child_content('cluster-location')
+ cluster_identity['cluster_name'] = identity_info.get_child_content('cluster-name')
+ return cluster_identity
+ return None
+
+ def get_cluster_nodes(self, ignore_error=True):
+ ''' get cluster node names, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a list of nodes
+ '''
+ zapi = netapp_utils.zapi.NaElement('cluster-node-get-iter')
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return None
+ self.module.fail_json(msg='Error fetching cluster identity info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ cluster_nodes = list()
+ if result.get_child_by_name('attributes-list'):
+ for node_info in result.get_child_by_name('attributes-list').get_children():
+ node_name = node_info.get_child_content('node-name')
+ if node_name is not None:
+ cluster_nodes.append(node_name)
+ return cluster_nodes
+ return None
+
+ def get_cluster_ip_addresses(self, cluster_ip_address, ignore_error=True):
+ ''' get list of IP addresses for this cluster
+ return:
+ a list of dictionaries
+ '''
+ if_infos = list()
+ zapi = netapp_utils.zapi.NaElement('net-interface-get-iter')
+ if cluster_ip_address is not None:
+ query = netapp_utils.zapi.NaElement('query')
+ net_info = netapp_utils.zapi.NaElement('net-interface-info')
+ net_info.add_new_child('address', cluster_ip_address)
+ query.add_child_elem(net_info)
+ zapi.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return if_infos
+ self.module.fail_json(msg='Error getting IP addresses: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('attributes-list'):
+ for net_info in result.get_child_by_name('attributes-list').get_children():
+ if net_info:
+ if_info = dict()
+ if_info['address'] = net_info.get_child_content('address')
+ if_info['home_node'] = net_info.get_child_content('home-node')
+ if_infos.append(if_info)
+ return if_infos
+
+ def get_cluster_ip_address(self, cluster_ip_address, ignore_error=True):
+ ''' get node information if it is discoverable
+ return:
+ None if the cluster cannot be reached
+ a dictionary of attributes
+ '''
+ if cluster_ip_address is None:
+ return None
+ nodes = self.get_cluster_ip_addresses(cluster_ip_address, ignore_error=ignore_error)
+ return nodes if len(nodes) > 0 else None
+
+ def create_cluster(self, older_api=False):
+ """
+ Create a cluster
+ """
+ # Note: cannot use node_name here:
+ # 13001:The "-node-names" parameter must be used with either the "-node-uuids" or the "-cluster-ips" parameters.
+ options = {'cluster-name': self.parameters['cluster_name']}
+ if not older_api and self.parameters.get('single_node_cluster') is not None:
+ options['single-node-cluster'] = str(self.parameters['single_node_cluster']).lower()
+ cluster_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-create', **options)
+ try:
+ self.server.invoke_successfully(cluster_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Extra input: single-node-cluster" and not older_api:
+ return self.create_cluster(older_api=True)
+ # Error 36503 denotes node already being used.
+ if to_native(error.code) == "36503":
+ return False
+ self.module.fail_json(msg='Error creating cluster %s: %s'
+ % (self.parameters['cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def add_node(self, older_api=False):
+ """
+ Add a node to an existing cluster
+ 9.2 and 9.3 do not support cluster-ips so fallback to node-ip
+ """
+ if self.parameters.get('cluster_ip_address') is not None:
+ cluster_add_node = netapp_utils.zapi.NaElement('cluster-add-node')
+ if older_api:
+ cluster_add_node.add_new_child('node-ip', self.parameters.get('cluster_ip_address'))
+ else:
+ cluster_ips = netapp_utils.zapi.NaElement('cluster-ips')
+ cluster_ips.add_new_child('ip-address', self.parameters.get('cluster_ip_address'))
+ cluster_add_node.add_child_elem(cluster_ips)
+ if self.parameters.get('node_name') is not None:
+ node_names = netapp_utils.zapi.NaElement('node-names')
+ node_names.add_new_child('string', self.parameters.get('node_name'))
+ cluster_add_node.add_child_elem(node_names)
+
+ else:
+ return False
+ try:
+ self.server.invoke_successfully(cluster_add_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Extra input: cluster-ips" and not older_api:
+ return self.add_node(older_api=True)
+ # skip if error says no failed operations to retry.
+ if to_native(error) == "NetApp API failed. Reason - 13001:There are no failed \"cluster create\" or \"cluster add-node\" operations to retry.":
+ return False
+ self.module.fail_json(msg='Error adding node with ip %s: %s'
+ % (self.parameters.get('cluster_ip_address'), to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def remove_node(self):
+ """
+ Remove a node from an existing cluster
+ """
+ cluster_remove_node = netapp_utils.zapi.NaElement('cluster-remove-node')
+ from_node = ''
+ # cluster-ip and node-name are mutually exclusive:
+ # 13115:Element "cluster-ip" within "cluster-remove-node" has been excluded by another element.
+ if self.parameters.get('cluster_ip_address') is not None:
+ cluster_remove_node.add_new_child('cluster-ip', self.parameters.get('cluster_ip_address'))
+ from_node = 'IP: %s' % self.parameters.get('cluster_ip_address')
+ elif self.parameters.get('node_name') is not None:
+ cluster_remove_node.add_new_child('node', self.parameters.get('node_name'))
+ from_node = 'name: %s' % self.parameters.get('node_name')
+
+ try:
+ self.server.invoke_successfully(cluster_remove_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Unable to find API: cluster-remove-node":
+ msg = 'Error: ZAPI is not available. Removing a node requires ONTAP 9.4 or newer.'
+ self.module.fail_json(msg=msg)
+ self.module.fail_json(msg='Error removing node with %s: %s'
+ % (from_node, to_native(error)), exception=traceback.format_exc())
+
+ def modify_cluster_identity(self, modify):
+ """
+ Modifies the cluster identity
+ """
+ cluster_modify = netapp_utils.zapi.NaElement('cluster-identity-modify')
+ if modify.get('cluster_name') is not None:
+ cluster_modify.add_new_child("cluster-name", modify.get('cluster_name'))
+ if modify.get('cluster_location') is not None:
+ cluster_modify.add_new_child("cluster-location", modify.get('cluster_location'))
+ if modify.get('cluster_contact') is not None:
+ cluster_modify.add_new_child("cluster-contact", modify.get('cluster_contact'))
+
+ try:
+ self.server.invoke_successfully(cluster_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cluster idetity details %s: %s'
+ % (self.parameters['cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def cluster_create_wait(self):
+ """
+ Wait whilst cluster creation completes
+ """
+
+ cluster_wait = netapp_utils.zapi.NaElement('cluster-create-join-progress-get')
+ is_complete = False
+ status = ''
+ wait = False # do not wait on the first call
+
+ while not is_complete and status not in ('failed', 'success'):
+ if wait:
+ time.sleep(10)
+ else:
+ wait = True
+ try:
+ result = self.server.invoke_successfully(cluster_wait, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+
+ self.module.fail_json(msg='Error creating cluster %s: %s'
+ % (self.parameters.get('cluster_name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ clus_progress = result.get_child_by_name('attributes')
+ result = clus_progress.get_child_by_name('cluster-create-join-progress-info')
+ is_complete = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=result.get_child_content('is-complete'))
+ status = result.get_child_content('status')
+
+ if not is_complete and status != 'success':
+ current_status_message = result.get_child_content('current-status-message')
+
+ self.module.fail_json(
+ msg='Failed to create cluster %s: %s' % (self.parameters.get('cluster_name'), current_status_message))
+
+ return is_complete
+
+ def node_add_wait(self):
+ """
+ Wait whilst node is being added to the existing cluster
+ """
+ cluster_node_status = netapp_utils.zapi.NaElement('cluster-add-node-status-get-iter')
+ node_status_info = netapp_utils.zapi.NaElement('cluster-create-add-node-status-info')
+ node_status_info.add_new_child('cluster-ip', self.parameters.get('cluster_ip_address'))
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(node_status_info)
+ cluster_node_status.add_child_elem(query)
+
+ is_complete = None
+ failure_msg = None
+ wait = False # do not wait on the first call
+
+ while is_complete != 'success' and is_complete != 'failure':
+ if wait:
+ time.sleep(10)
+ else:
+ wait = True
+ try:
+ result = self.server.invoke_successfully(cluster_node_status, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Unable to find API: cluster-add-node-status-get-iter":
+ # This API is not supported for 9.3 or earlier releases, just wait a bit
+ time.sleep(60)
+ return
+ self.module.fail_json(msg='Error adding node with ip address %s: %s'
+ % (self.parameters.get('cluster_ip_address'), to_native(error)),
+ exception=traceback.format_exc())
+
+ attributes_list = result.get_child_by_name('attributes-list')
+ join_progress = attributes_list.get_child_by_name('cluster-create-add-node-status-info')
+ is_complete = join_progress.get_child_content('status')
+ failure_msg = join_progress.get_child_content('failure-msg')
+
+ if is_complete != 'success':
+ if 'Node is already in a cluster' in failure_msg:
+ return
+ else:
+ self.module.fail_json(
+ msg='Error adding node with ip address %s' % (self.parameters.get('cluster_ip_address')))
+
+ def node_remove_wait(self):
+ ''' wait for node name or clister IP address to disappear '''
+ node_name = self.parameters.get('node_name')
+ node_ip = self.parameters.get('cluster_ip_address')
+ timer = 180 # 180 seconds
+ while timer > 0:
+ if node_name is not None and node_name not in self.get_cluster_nodes():
+ return
+ if node_ip is not None and self.get_cluster_ip_address(node_ip) is None:
+ return
+ time.sleep(30)
+ timer -= 30
+ self.module.fail_json(msg='Timeout waiting for node to be removed from cluster.')
+
+ def autosupport_log(self):
+ """
+ Autosupport log for cluster
+ :return:
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_cluster", cserver)
+
+ def apply(self):
+ """
+ Apply action to cluster
+ """
+ cluster_action = None
+ node_action = None
+
+ cluster_identity = self.get_cluster_identity(ignore_error=True)
+ if self.parameters.get('cluster_name') is not None:
+ cluster_action = self.na_helper.get_cd_action(cluster_identity, self.parameters)
+ if self.parameters.get('cluster_ip_address') is not None:
+ existing_interfaces = self.get_cluster_ip_address(self.parameters.get('cluster_ip_address'))
+ if self.parameters.get('state') == 'present':
+ node_action = 'add_node' if existing_interfaces is None else None
+ else:
+ node_action = 'remove_node' if existing_interfaces is not None else None
+ if self.parameters.get('node_name') is not None and self.parameters['state'] == 'absent':
+ nodes = self.get_cluster_nodes()
+ if self.parameters.get('node_name') in nodes:
+ node_action = 'remove_node'
+ modify = self.na_helper.get_modified_attributes(cluster_identity, self.parameters)
+
+ if node_action is not None:
+ self.na_helper.changed = True
+
+ if not self.module.check_mode:
+ if cluster_action == 'create':
+ if self.create_cluster():
+ self.cluster_create_wait()
+ if node_action == 'add_node':
+ if self.add_node():
+ self.node_add_wait()
+ elif node_action == 'remove_node':
+ self.remove_node()
+ self.node_remove_wait()
+ if modify:
+ self.modify_cluster_identity(modify)
+ self.autosupport_log()
+ self.module.exit_json(changed=self.na_helper.changed, warnings=self.warnings)
+
+
+def main():
+ """
+ Create object and call apply
+ """
+ cluster_obj = NetAppONTAPCluster()
+ cluster_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py
new file mode 100644
index 00000000..9a57a2bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Enable or disable HA on a cluster"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cluster_ha
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - "Whether HA on cluster should be enabled or disabled."
+ default: present
+short_description: NetApp ONTAP Manage HA status for cluster
+version_added: 2.6.0
+'''
+
+EXAMPLES = """
+ - name: "Enable HA status for cluster"
+ na_ontap_cluster_ha:
+ state: present
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapClusterHA(object):
+ """
+ object initialize and class methods
+ """
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def modify_cluster_ha(self, configure):
+ """
+ Enable or disable HA on cluster
+ :return: None
+ """
+ cluster_ha_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-ha-modify', **{'ha-configured': configure})
+ try:
+ self.server.invoke_successfully(cluster_ha_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cluster HA to %s: %s'
+ % (configure, to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_cluster_ha_enabled(self):
+ """
+ Get current cluster HA details
+ :return: dict if enabled, None if disabled
+ """
+ cluster_ha_get = netapp_utils.zapi.NaElement('cluster-ha-get')
+ try:
+ result = self.server.invoke_successfully(cluster_ha_get,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError:
+ self.module.fail_json(msg='Error fetching cluster HA details',
+ exception=traceback.format_exc())
+ cluster_ha_info = result.get_child_by_name('attributes').get_child_by_name('cluster-ha-info')
+ if cluster_ha_info.get_child_content('ha-configured') == 'true':
+ return {'ha-configured': True}
+ return None
+
+ def apply(self):
+ """
+ Apply action to cluster HA
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_cluster_ha", cserver)
+ current = self.get_cluster_ha_enabled()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.modify_cluster_ha("true")
+ elif cd_action == 'delete':
+ self.modify_cluster_ha("false")
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create object and call apply
+ """
+ ha_obj = NetAppOntapClusterHA()
+ ha_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py
new file mode 100644
index 00000000..3201770f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete cluster peer relations on ONTAP
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cluster_peer
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - Whether the specified cluster peer should exist or not.
+ default: present
+ source_intercluster_lifs:
+ description:
+ - List of intercluster addresses of the source cluster.
+ - Used as peer-addresses in destination cluster.
+ - All these intercluster lifs should belong to the source cluster.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ aliases:
+ - source_intercluster_lif
+ dest_intercluster_lifs:
+ description:
+ - List of intercluster addresses of the destination cluster.
+ - Used as peer-addresses in source cluster.
+ - All these intercluster lifs should belong to the destination cluster.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ aliases:
+ - dest_intercluster_lif
+ passphrase:
+ description:
+ - The arbitrary passphrase that matches the one given to the peer cluster.
+ type: str
+ source_cluster_name:
+ description:
+ - The name of the source cluster name in the peer relation to be deleted.
+ type: str
+ dest_cluster_name:
+ description:
+ - The name of the destination cluster name in the peer relation to be deleted.
+ - Required for delete
+ type: str
+ dest_hostname:
+ description:
+ - Destination cluster IP or hostname which needs to be peered
+ - Required to complete the peering process at destination cluster.
+ required: True
+ type: str
+ dest_username:
+ description:
+ - Destination username.
+ - Optional if this is same as source username.
+ type: str
+ dest_password:
+ description:
+ - Destination password.
+ - Optional if this is same as source password.
+ type: str
+ ipspace:
+ description:
+ - IPspace of the local intercluster LIFs.
+ - Assumes Default IPspace if not provided.
+ type: str
+ version_added: '20.11.0'
+ encryption_protocol_proposed:
+ description:
+ - Encryption protocol to be used for inter-cluster communication.
+ - Only available on ONTAP 9.5 or later.
+ choices: ['tls_psk', 'none']
+ type: str
+ version_added: '20.5.0'
+short_description: NetApp ONTAP Manage Cluster peering
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ - name: Create cluster peer
+ na_ontap_cluster_peer:
+ state: present
+ source_intercluster_lifs: 1.2.3.4,1.2.3.5
+ dest_intercluster_lifs: 1.2.3.6,1.2.3.7
+ passphrase: XXXX
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ dest_hostname: "{{ dest_netapp_hostname }}"
+ encryption_protocol_proposed: tls_psk
+
+ - name: Delete cluster peer
+ na_ontap_cluster_peer:
+ state: absent
+ source_cluster_name: test-source-cluster
+ dest_cluster_name: test-dest-cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ dest_hostname: "{{ dest_netapp_hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPClusterPeer(object):
+ """
+ Class with cluster peer methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ source_intercluster_lifs=dict(required=False, type='list', elements='str', aliases=['source_intercluster_lif']),
+ dest_intercluster_lifs=dict(required=False, type='list', elements='str', aliases=['dest_intercluster_lif']),
+ passphrase=dict(required=False, type='str', no_log=True),
+ dest_hostname=dict(required=True, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True),
+ source_cluster_name=dict(required=False, type='str'),
+ dest_cluster_name=dict(required=False, type='str'),
+ ipspace=dict(required=False, type='str'),
+ encryption_protocol_proposed=dict(required=False, type='str', choices=['tls_psk', 'none'])
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=[['source_intercluster_lifs', 'dest_intercluster_lifs']],
+ required_if=[('state', 'absent', ['source_cluster_name', 'dest_cluster_name'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # set destination server connection
+ self.module.params['hostname'] = self.parameters['dest_hostname']
+ if self.parameters.get('dest_username'):
+ self.module.params['username'] = self.parameters['dest_username']
+ if self.parameters.get('dest_password'):
+ self.module.params['password'] = self.parameters['dest_password']
+ self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # reset to source host connection for asup logs
+ self.module.params['hostname'] = self.parameters['hostname']
+ self.module.params['username'] = self.parameters['username']
+ self.module.params['password'] = self.parameters['password']
+
+ def cluster_peer_get_iter(self, cluster):
+ """
+ Compose NaElement object to query current source cluster using peer-cluster-name and peer-addresses parameters
+ :param cluster: type of cluster (source or destination)
+ :return: NaElement object for cluster-get-iter with query
+ """
+ cluster_peer_get = netapp_utils.zapi.NaElement('cluster-peer-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cluster_peer_info = netapp_utils.zapi.NaElement('cluster-peer-info')
+ if cluster == 'source':
+ peer_lifs, peer_cluster = 'dest_intercluster_lifs', 'dest_cluster_name'
+ else:
+ peer_lifs, peer_cluster = 'source_intercluster_lifs', 'source_cluster_name'
+ if self.parameters.get(peer_lifs):
+ peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
+ for peer in self.parameters.get(peer_lifs):
+ peer_addresses.add_new_child('remote-inet-address', peer)
+ cluster_peer_info.add_child_elem(peer_addresses)
+ if self.parameters.get(peer_cluster):
+ cluster_peer_info.add_new_child('cluster-name', self.parameters[peer_cluster])
+ query.add_child_elem(cluster_peer_info)
+ cluster_peer_get.add_child_elem(query)
+ return cluster_peer_get
+
+ def cluster_peer_get(self, cluster):
+ """
+ Get current cluster peer info
+ :param cluster: type of cluster (source or destination)
+ :return: Dictionary of current cluster peer details if query successful, else return None
+ """
+ cluster_peer_get_iter = self.cluster_peer_get_iter(cluster)
+ result, cluster_info = None, dict()
+ if cluster == 'source':
+ server = self.server
+ else:
+ server = self.dest_server
+ try:
+ result = server.invoke_successfully(cluster_peer_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster peer %s: %s'
+ % (self.parameters['dest_cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster peer details
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ cluster_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('cluster-peer-info')
+ cluster_info['cluster_name'] = cluster_peer_info.get_child_content('cluster-name')
+ peers = cluster_peer_info.get_child_by_name('peer-addresses')
+ cluster_info['peer-addresses'] = [peer.get_content() for peer in peers.get_children()]
+ return cluster_info
+ return None
+
+ def cluster_peer_delete(self, cluster):
+ """
+ Delete a cluster peer on source or destination
+ For source cluster, peer cluster-name = destination cluster name and vice-versa
+ :param cluster: type of cluster (source or destination)
+ :return:
+ """
+ if cluster == 'source':
+ server, peer_cluster_name = self.server, self.parameters['dest_cluster_name']
+ else:
+ server, peer_cluster_name = self.dest_server, self.parameters['source_cluster_name']
+ cluster_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-peer-delete', **{'cluster-name': peer_cluster_name})
+ try:
+ server.invoke_successfully(cluster_peer_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cluster peer %s: %s'
+ % (peer_cluster_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def cluster_peer_create(self, cluster):
+ """
+ Create a cluster peer on source or destination
+ For source cluster, peer addresses = destination inter-cluster LIFs and vice-versa
+ :param cluster: type of cluster (source or destination)
+ :return: None
+ """
+ cluster_peer_create = netapp_utils.zapi.NaElement.create_node_with_children('cluster-peer-create')
+ if self.parameters.get('passphrase') is not None:
+ cluster_peer_create.add_new_child('passphrase', self.parameters['passphrase'])
+ peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
+ if cluster == 'source':
+ server, peer_address = self.server, self.parameters['dest_intercluster_lifs']
+ else:
+ server, peer_address = self.dest_server, self.parameters['source_intercluster_lifs']
+ for each in peer_address:
+ peer_addresses.add_new_child('remote-inet-address', each)
+ cluster_peer_create.add_child_elem(peer_addresses)
+ if self.parameters.get('encryption_protocol_proposed') is not None:
+ cluster_peer_create.add_new_child('encryption-protocol-proposed', self.parameters['encryption_protocol_proposed'])
+ if self.parameters.get('ipspace') is not None:
+ cluster_peer_create.add_new_child('ipspace-name', self.parameters['ipspace'])
+
+ try:
+ server.invoke_successfully(cluster_peer_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating cluster peer %s: %s'
+ % (peer_address, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to cluster peer
+ :return: None
+ """
+ self.asup_log_for_cserver("na_ontap_cluster_peer")
+ source = self.cluster_peer_get('source')
+ destination = self.cluster_peer_get('destination')
+ source_action = self.na_helper.get_cd_action(source, self.parameters)
+ destination_action = self.na_helper.get_cd_action(destination, self.parameters)
+ self.na_helper.changed = False
+ # create only if expected cluster peer relation is not present on both source and destination clusters
+ if source_action == 'create' and destination_action == 'create':
+ if not self.module.check_mode:
+ self.cluster_peer_create('source')
+ self.cluster_peer_create('destination')
+ self.na_helper.changed = True
+ # delete peer relation in cluster where relation is present
+ else:
+ if source_action == 'delete':
+ if not self.module.check_mode:
+ self.cluster_peer_delete('source')
+ self.na_helper.changed = True
+ if destination_action == 'delete':
+ if not self.module.check_mode:
+ self.cluster_peer_delete('destination')
+ self.na_helper.changed = True
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ """
+ Execute action
+ :return: None
+ """
+ community_obj = NetAppONTAPClusterPeer()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py
new file mode 100644
index 00000000..bb8bc364
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+'''
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Run system-cli commands on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_command
+short_description: NetApp ONTAP Run any cli command, the username provided needs to have console login permission.
+version_added: 2.7.0
+options:
+ command:
+ description:
+ - a comma separated list containing the command and arguments.
+ required: true
+ type: list
+ elements: str
+ privilege:
+ description:
+ - privilege level at which to run the command.
+ choices: ['admin', 'advanced']
+ default: admin
+ type: str
+ version_added: 2.8.0
+ return_dict:
+ description:
+ - Returns a parsesable dictionary instead of raw XML output
+ - C(result_value)
+ - C(status) > passed, failed..
+ - C(stdout) > command output in plaintext)
+ - C(stdout_lines) > list of command output lines)
+ - C(stdout_lines_filter) > empty list or list of command output lines matching I(include_lines) or I(exclude_lines) parameters.
+ type: bool
+ default: false
+ version_added: 2.9.0
+ vserver:
+ description:
+ - If running as vserver admin, you must give a I(vserver) or module will fail
+ version_added: "19.10.0"
+ type: str
+ include_lines:
+ description:
+ - applied only when I(return_dict) is true
+ - return only lines containing string pattern in C(stdout_lines_filter)
+ default: ''
+ type: str
+ version_added: "19.10.0"
+ exclude_lines:
+ description:
+ - applied only when I(return_dict) is true
+ - return only lines containing string pattern in C(stdout_lines_filter)
+ default: ''
+ type: str
+ version_added: "19.10.0"
+'''
+
+EXAMPLES = """
+ - name: run ontap cli command
+ na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['version']
+
+ # Same as above, but returns parseable dictonary
+ - name: run ontap cli command
+ na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['node', 'show', '-fields', 'node,health,uptime,model']
+ privilege: 'admin'
+ return_dict: true
+
+ # Same as above, but with lines filtering
+ - name: run ontap cli command
+ na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['node', 'show', '-fields', 'node,health,uptime,model']
+ exlude_lines: 'ode ' # Exclude lines with 'Node ' or 'node'
+ privilege: 'admin'
+ return_dict: true
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCommand(object):
+ ''' calls a CLI command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='list', elements='str'),
+ privilege=dict(required=False, type='str', choices=['admin', 'advanced'], default='admin'),
+ return_dict=dict(required=False, type='bool', default=False),
+ vserver=dict(required=False, type='str'),
+ include_lines=dict(required=False, type='str', default=''),
+ exclude_lines=dict(required=False, type='str', default=''),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.privilege = parameters['privilege']
+ self.vserver = parameters['vserver']
+ self.return_dict = parameters['return_dict']
+ self.include_lines = parameters['include_lines']
+ self.exclude_lines = parameters['exclude_lines']
+
+ self.result_dict = dict()
+ self.result_dict['status'] = ""
+ self.result_dict['result_value'] = 0
+ self.result_dict['invoked_command'] = " ".join(self.command)
+ self.result_dict['stdout'] = ""
+ self.result_dict['stdout_lines'] = []
+ self.result_dict['stdout_lines_filter'] = []
+ self.result_dict['xml_dict'] = dict()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ try:
+ netapp_utils.ems_log_event(event_name, cserver)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Cluster Admin required if -vserver is not passed %s: %s' %
+ (self.command, to_native(error)),
+ exception=traceback.format_exc())
+
+ def run_command(self):
+ ''' calls the ZAPI '''
+ self.ems()
+ command_obj = netapp_utils.zapi.NaElement("system-cli")
+
+ args_obj = netapp_utils.zapi.NaElement("args")
+ if self.return_dict:
+ args_obj.add_new_child('arg', 'set')
+ args_obj.add_new_child('arg', '-showseparator')
+ args_obj.add_new_child('arg', '"###"')
+ args_obj.add_new_child('arg', ';')
+ for arg in self.command:
+ args_obj.add_new_child('arg', arg)
+ command_obj.add_child_elem(args_obj)
+ command_obj.add_new_child('priv', self.privilege)
+
+ try:
+ output = self.server.invoke_successfully(command_obj, True)
+ if self.return_dict:
+ # Parseable dict output
+ retval = self.parse_xml_to_dict(output.to_string())
+ else:
+ # Raw XML output
+ retval = output.to_string()
+
+ return retval
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error running command %s: %s' %
+ (self.command, to_native(error)),
+ exception=traceback.format_exc())
+
+ def ems(self):
+ """
+ Error out if Cluster Admin username is used with Vserver, or Vserver admin used with out vserver being set
+ :return:
+ """
+ if self.vserver:
+ ems_server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+ try:
+ netapp_utils.ems_log_event("na_ontap_command" + str(self.command), ems_server)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Vserver admin required if -vserver is given %s: %s' %
+ (self.command, to_native(error)),
+ exception=traceback.format_exc())
+ else:
+ self.asup_log_for_cserver("na_ontap_command: " + str(self.command))
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = True
+ output = self.run_command()
+ self.module.exit_json(changed=changed, msg=output)
+
+ def parse_xml_to_dict(self, xmldata):
+ '''Parse raw XML from system-cli and create an Ansible parseable dictonary'''
+ xml_import_ok = True
+ xml_parse_ok = True
+
+ try:
+ importing = 'ast'
+ import ast
+ importing = 'xml.parsers.expat'
+ import xml.parsers.expat
+ except ImportError:
+ self.result_dict['status'] = "XML parsing failed. Cannot import %s!" % importing
+ self.result_dict['stdout'] = str(xmldata)
+ self.result_dict['result_value'] = -1
+ xml_import_ok = False
+
+ if xml_import_ok:
+ xml_str = xmldata.decode('utf-8').replace('\n', '---')
+ xml_parser = xml.parsers.expat.ParserCreate()
+ xml_parser.StartElementHandler = self._start_element
+ xml_parser.CharacterDataHandler = self._char_data
+ xml_parser.EndElementHandler = self._end_element
+
+ try:
+ xml_parser.Parse(xml_str)
+ except xml.parsers.expat.ExpatError as errcode:
+ self.result_dict['status'] = "XML parsing failed: " + str(errcode)
+ self.result_dict['stdout'] = str(xmldata)
+ self.result_dict['result_value'] = -1
+ xml_parse_ok = False
+
+ if xml_parse_ok:
+ self.result_dict['status'] = self.result_dict['xml_dict']['results']['attrs']['status']
+ stdout_string = self._format_escaped_data(self.result_dict['xml_dict']['cli-output']['data'])
+ self.result_dict['stdout'] = stdout_string
+ # Generate stdout_lines list
+ for line in stdout_string.split('\n'):
+ stripped_line = line.strip()
+ if len(stripped_line) > 1:
+ self.result_dict['stdout_lines'].append(stripped_line)
+
+ # Generate stdout_lines_filter_list
+ if self.exclude_lines:
+ if self.include_lines in stripped_line and self.exclude_lines not in stripped_line:
+ self.result_dict['stdout_lines_filter'].append(stripped_line)
+ else:
+ if self.include_lines and self.include_lines in stripped_line:
+ self.result_dict['stdout_lines_filter'].append(stripped_line)
+
+ self.result_dict['xml_dict']['cli-output']['data'] = stdout_string
+ cli_result_value = self.result_dict['xml_dict']['cli-result-value']['data']
+ try:
+ # get rid of extra quotes "'1'", but maybe "u'1'" or "b'1'"
+ cli_result_value = ast.literal_eval(cli_result_value)
+ except (SyntaxError, ValueError):
+ pass
+ try:
+ self.result_dict['result_value'] = int(cli_result_value)
+ except ValueError:
+ self.result_dict['result_value'] = cli_result_value
+
+ return self.result_dict
+
+ def _start_element(self, name, attrs):
+ ''' Start XML element '''
+ self.result_dict['xml_dict'][name] = dict()
+ self.result_dict['xml_dict'][name]['attrs'] = attrs
+ self.result_dict['xml_dict'][name]['data'] = ""
+ self.result_dict['xml_dict']['active_element'] = name
+ self.result_dict['xml_dict']['last_element'] = ""
+
+ def _char_data(self, data):
+ ''' Dump XML elemet data '''
+ self.result_dict['xml_dict'][str(self.result_dict['xml_dict']['active_element'])]['data'] = repr(data)
+
+ def _end_element(self, name):
+ self.result_dict['xml_dict']['last_element'] = name
+ self.result_dict['xml_dict']['active_element'] = ""
+
+ @classmethod
+ def _format_escaped_data(cls, datastring):
+ ''' replace helper escape sequences '''
+ formatted_string = datastring.replace('------', '---').replace('---', '\n').replace("###", " ").strip()
+ retval_string = ""
+ for line in formatted_string.split('\n'):
+ stripped_line = line.strip()
+ if len(stripped_line) > 1:
+ retval_string += stripped_line + "\n"
+ return retval_string
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPCommand()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py
new file mode 100644
index 00000000..9833cc7d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_disks
+
+short_description: NetApp ONTAP Assign disks to nodes
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Assign all or part of disks to nodes.
+
+options:
+
+ node:
+ required: true
+ type: str
+ description:
+ - It specifies the node to assign all visible unowned disks.
+
+ disk_count:
+ description:
+ - Total number of disks a node should own
+ type: int
+ version_added: 2.9.0
+
+ disk_type:
+ description:
+ - Assign specified type of disk (or set of disks). The disk_count parameter is mandatory.
+ type: str
+ choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']
+ version_added: '20.6.0'
+
+'''
+
+EXAMPLES = """
+ - name: Assign unowned disks
+ na_ontap_disks:
+ node: cluster-01
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+
+ - name: Assign specified total disks
+ na_ontap_disks:
+
+ disk_count: 56
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+
+ - name: Assign disk with disk type
+ na_ontap_disks:
+ node: cluster-01
+ disk_count: 56
+ disk_type: VMDISK
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapDisks(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ node=dict(required=True, type='str'),
+ disk_count=dict(required=False, type='int'),
+ disk_type=dict(required=False, type='str', choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_unassigned_disk_count(self, disk_type=None):
+ """
+ Check for free disks
+ """
+ disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
+ disk_raid_info = netapp_utils.zapi.NaElement('disk-raid-info')
+ disk_raid_info.add_new_child('container-type', 'unassigned')
+ disk_storage_info.add_child_elem(disk_raid_info)
+
+ disk_query = netapp_utils.zapi.NaElement('query')
+ disk_query.add_child_elem(disk_storage_info)
+
+ if disk_type is not None:
+ disk_inventory_info = netapp_utils.zapi.NaElement('storage-inventory-info')
+ disk_inventory_info.add_new_child('disk-type', disk_type)
+
+ disk_iter.add_child_elem(disk_query)
+
+ try:
+ result = self.server.invoke_successfully(disk_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting disk information: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ return int(result.get_child_content('num-records'))
+
+ def get_owned_disk_count(self, disk_type=None):
+ """
+ Check for owned disks
+ """
+ disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
+ disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info')
+ disk_ownership_info.add_new_child('home-node-name', self.parameters['node'])
+ disk_storage_info.add_child_elem(disk_ownership_info)
+
+ disk_query = netapp_utils.zapi.NaElement('query')
+ disk_query.add_child_elem(disk_storage_info)
+
+ if disk_type is not None:
+ disk_inventory_info = netapp_utils.zapi.NaElement('storage-inventory-info')
+ disk_inventory_info.add_new_child('disk-type', disk_type)
+
+ disk_iter.add_child_elem(disk_query)
+
+ try:
+ result = self.server.invoke_successfully(disk_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting disk information: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ return int(result.get_child_content('num-records'))
+
+ def disk_assign(self, needed_disks, disk_type=None):
+ """
+ Set node as disk owner.
+ """
+ if needed_disks > 0:
+ assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'disk-sanown-assign', **{'owner': self.parameters['node'],
+ 'disk-count': str(needed_disks)})
+ else:
+ assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'disk-sanown-assign', **{'node-name': self.parameters['node'],
+ 'all': 'true'})
+ if disk_type is not None:
+ assign_disk.add_new_child('disk-type', disk_type)
+ try:
+ self.server.invoke_successfully(assign_disk,
+ enable_tunneling=True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "13001":
+ # Error 13060 denotes aggregate is already online
+ return False
+ else:
+ self.module.fail_json(msg='Error assigning disks %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Apply action to disks'''
+ changed = False
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_disks", cserver)
+
+ # check if anything needs to be changed (add/delete/update)
+ unowned_disks = self.get_unassigned_disk_count(disk_type=self.parameters.get('disk_type'))
+ owned_disks = self.get_owned_disk_count(disk_type=self.parameters.get('disk_type'))
+ if 'disk_count' in self.parameters:
+ if self.parameters['disk_count'] < owned_disks:
+ self.module.fail_json(msg="Fewer disks than are currently owned was requested. "
+ "This module does not do any disk removing. "
+ "All disk removing will need to be done manually.")
+ if self.parameters['disk_count'] > owned_disks + unowned_disks:
+ self.module.fail_json(msg="Not enough unowned disks remain to fulfill request")
+ if unowned_disks >= 1:
+ if 'disk_count' in self.parameters:
+ if self.parameters['disk_count'] > owned_disks:
+ needed_disks = self.parameters['disk_count'] - owned_disks
+ if not self.module.check_mode:
+ self.disk_assign(needed_disks, disk_type=self.parameters.get('disk_type'))
+ changed = True
+ else:
+ if not self.module.check_mode:
+ self.disk_assign(0)
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ ''' Create object and call apply '''
+ obj_aggr = NetAppOntapDisks()
+ obj_aggr.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py
new file mode 100644
index 00000000..b3755a2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_dns
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_dns
+short_description: NetApp ONTAP Create, delete, modify DNS servers.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, modify DNS servers.
+- With REST, the module is currently limited to data vservers for delete or modify operations.
+options:
+ state:
+ description:
+ - Whether the DNS servers should be enabled for the given vserver.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ domains:
+ description:
+ - List of DNS domains such as 'sales.bar.com'. The first domain is the one that the Vserver belongs to.
+ type: list
+ elements: str
+
+ nameservers:
+ description:
+ - List of IPv4 addresses of name servers such as '123.123.123.123'.
+ type: list
+ elements: str
+
+ skip_validation:
+ type: bool
+ description:
+ - By default, all nameservers are checked to validate they are available to resolve.
+ - If you DNS servers are not yet installed or momentarily not available, you can set this option to 'true'
+ - to bypass the check for all servers specified in nameservers field.
+ version_added: 2.8.0
+'''
+
+EXAMPLES = """
+ - name: create DNS
+ na_ontap_dns:
+ state: present
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ vserver: "{{vservername}}"
+ domains: sales.bar.com
+ nameservers: 10.193.0.250,10.192.0.250
+ skip_validation: true
+"""
+
+RETURN = """
+
+"""
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapDns(object):
+ """
+ Enable and Disable dns
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ domains=dict(required=False, type='list', elements='str'),
+ nameservers=dict(required=False, type='list', elements='str'),
+ skip_validation=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['domains', 'nameservers'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Cluster vserver and data vserver use different REST API.
+ self.is_cluster = False
+
+ # REST API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['skip_validation']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+
+ if error is not None:
+ self.module.fail_json(msg=error)
+
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def create_dns(self):
+ """
+ Create DNS server
+ :return: none
+ """
+ if self.use_rest:
+ if self.is_cluster:
+ api = 'cluster'
+ params = {
+ 'dns_domains': self.parameters['domains'],
+ 'name_servers': self.parameters['nameservers']
+ }
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ api = 'name-services/dns'
+ params = {
+ 'domains': self.parameters['domains'],
+ 'servers': self.parameters['nameservers'],
+ 'svm': {
+ 'name': self.parameters['vserver']
+ }
+ }
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ dns = netapp_utils.zapi.NaElement('net-dns-create')
+ nameservers = netapp_utils.zapi.NaElement('name-servers')
+ domains = netapp_utils.zapi.NaElement('domains')
+ for each in self.parameters['nameservers']:
+ ip_address = netapp_utils.zapi.NaElement('ip-address')
+ ip_address.set_content(each)
+ nameservers.add_child_elem(ip_address)
+ dns.add_child_elem(nameservers)
+ for each in self.parameters['domains']:
+ domain = netapp_utils.zapi.NaElement('string')
+ domain.set_content(each)
+ domains.add_child_elem(domain)
+ dns.add_child_elem(domains)
+ if self.parameters.get('skip_validation'):
+ validation = netapp_utils.zapi.NaElement('skip-config-validation')
+ validation.set_content(str(self.parameters['skip_validation']))
+ dns.add_child_elem(validation)
+ try:
+ self.server.invoke_successfully(dns, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating dns: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def destroy_dns(self, dns_attrs):
+ """
+ Destroys an already created dns
+ :return:
+ """
+ if self.use_rest:
+ if self.is_cluster:
+ error = 'cluster operation for deleting DNS is not supported with REST.'
+ self.module.fail_json(msg=error)
+ api = 'name-services/dns/' + dns_attrs['uuid']
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('net-dns-destroy'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying dns %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_cluster(self):
+ api = "cluster"
+ message, error = self.rest_api.get(api, None)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ self.module.fail_json(msg="no data from cluster %s" % str(message))
+ return message
+
+ def get_cluster_dns(self):
+ cluster_attrs = self.get_cluster()
+ dns_attrs = None
+ if self.parameters['vserver'] == cluster_attrs['name']:
+ dns_attrs = {
+ 'domains': cluster_attrs.get('dns_domains'),
+ 'nameservers': cluster_attrs.get('name_servers'),
+ 'uuid': cluster_attrs['uuid'],
+ }
+ self.is_cluster = True
+ if dns_attrs['domains'] is None and dns_attrs['nameservers'] is None:
+ dns_attrs = None
+ return dns_attrs
+
+ def get_dns(self):
+ if self.use_rest:
+ api = "name-services/dns"
+ params = {'fields': 'domains,servers,svm',
+ "svm.name": self.parameters['vserver']}
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ message = None
+ elif 'records' in message and len(message['records']) == 0:
+ message = None
+ elif 'records' not in message or len(message['records']) != 1:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ if message is not None:
+ record = message['records'][0]
+ attrs = {
+ 'domains': record['domains'],
+ 'nameservers': record['servers'],
+ 'uuid': record['svm']['uuid']
+ }
+ return attrs
+ return None
+ else:
+ dns_obj = netapp_utils.zapi.NaElement('net-dns-get')
+ try:
+ result = self.server.invoke_successfully(dns_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # 15661 is object not found
+ return None
+ else:
+ self.module.fail_json(msg=to_native(
+ error), exception=traceback.format_exc())
+
+ # read data for modify
+ attrs = dict()
+ attributes = result.get_child_by_name('attributes')
+ dns_info = attributes.get_child_by_name('net-dns-info')
+ nameservers = dns_info.get_child_by_name('name-servers')
+ attrs['nameservers'] = [each.get_content() for each in nameservers.get_children()]
+ domains = dns_info.get_child_by_name('domains')
+ attrs['domains'] = [each.get_content() for each in domains.get_children()]
+ attrs['skip_validation'] = dns_info.get_child_by_name('skip-config-validation')
+ return attrs
+
+ def modify_dns(self, dns_attrs):
+ if self.use_rest:
+ changed = False
+ params = {}
+ if dns_attrs['nameservers'] != self.parameters['nameservers']:
+ changed = True
+ params['servers'] = self.parameters['nameservers']
+ if dns_attrs['domains'] != self.parameters['domains']:
+ changed = True
+ params['domains'] = self.parameters['domains']
+ if changed and not self.module.check_mode:
+ uuid = dns_attrs['uuid']
+ api = "name-services/dns/" + uuid
+ if self.is_cluster:
+ api = 'cluster'
+ params = {
+ 'dns_domains': self.parameters['domains'],
+ 'name_servers': self.parameters['nameservers']
+ }
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ changed = False
+ dns = netapp_utils.zapi.NaElement('net-dns-modify')
+ if dns_attrs['nameservers'] != self.parameters['nameservers']:
+ changed = True
+ nameservers = netapp_utils.zapi.NaElement('name-servers')
+ for each in self.parameters['nameservers']:
+ ip_address = netapp_utils.zapi.NaElement('ip-address')
+ ip_address.set_content(each)
+ nameservers.add_child_elem(ip_address)
+ dns.add_child_elem(nameservers)
+ if dns_attrs['domains'] != self.parameters['domains']:
+ changed = True
+ domains = netapp_utils.zapi.NaElement('domains')
+ for each in self.parameters['domains']:
+ domain = netapp_utils.zapi.NaElement('string')
+ domain.set_content(each)
+ domains.add_child_elem(domain)
+ dns.add_child_elem(domains)
+ if changed and not self.module.check_mode:
+ if self.parameters.get('skip_validation'):
+ validation = netapp_utils.zapi.NaElement('skip-config-validation')
+ validation.set_content(str(self.parameters['skip_validation']))
+ dns.add_child_elem(validation)
+ try:
+ self.server.invoke_successfully(dns, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying dns %s' %
+ (to_native(error)), exception=traceback.format_exc())
+ return changed
+
+ def apply(self):
+ # asup logging
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_dns", self.server)
+ dns_attrs = self.get_dns()
+ if self.use_rest and dns_attrs is None:
+ # There is a chance we are working at the cluster level
+ dns_attrs = self.get_cluster_dns()
+ changed = False
+ if self.parameters['state'] == 'present':
+ if dns_attrs is not None:
+ changed = self.modify_dns(dns_attrs)
+ else:
+ if not self.module.check_mode:
+ self.create_dns()
+ changed = True
+ else:
+ if dns_attrs is not None:
+ if not self.module.check_mode:
+ self.destroy_dns(dns_attrs)
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Create, Delete, Modify DNS servers.
+ """
+ obj = NetAppOntapDns()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py
new file mode 100644
index 00000000..def8c76e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_efficiency_policy
+short_description: NetApp ONTAP manage efficiency policies (sis policies)
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete efficiency policies (sis policies)
+options:
+ state:
+ description:
+ - Whether the specified efficiency policy should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ policy_name:
+ description:
+ - the name of the efficiency policy
+ required: true
+ type: str
+
+ comment:
+ description:
+ - A brief description of the policy.
+ type: str
+
+ duration:
+ description:
+ - The duration in hours for which the scheduled efficiency operation should run.
+ After this time expires, the efficiency operation will be stopped even if the operation is incomplete.
+ If '-' is specified as the duration, the efficiency operation will run till it completes. Otherwise, the duration has to be an integer greater than 0.
+ By default, the operation runs till it completes.
+ type: str
+
+ enabled:
+ description:
+ - If the value is true, the efficiency policy is active in this cluster.
+ If the value is false this policy will not be activated by the schedulers and hence will be inactive.
+ type: bool
+
+ policy_type:
+ description:
+ - The policy type reflects the reason a volume using this policy will start processing a changelog.
+ - (Changelog processing is identifying and eliminating duplicate blocks which were written since the changelog was last processed.)
+ - threshold Changelog processing occurs once the changelog reaches a certain percent full.
+ - scheduled Changelog processing will be triggered by time.
+ choices: ['threshold', 'scheduled']
+ type: str
+
+ qos_policy:
+ description:
+ - QoS policy for the efficiency operation.
+ - background efficiency operation will run in background with minimal or no impact on data serving client operations,
+ - best-effort efficiency operations may have some impact on data serving client operations.
+ choices: ['background', 'best_effort']
+ type: str
+
+ schedule:
+ description:
+ - Cron type job schedule name. When the associated policy is set on a volume, the efficiency operation will be triggered for the volume on this schedule.
+ - These schedules can be created using the na_ontap_job_schedule module
+ type: str
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ changelog_threshold_percent:
+ description:
+ - Specifies the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour.
+ type: int
+ version_added: '19.11.0'
+'''
+
+EXAMPLES = """
+ - name: Create threshold efficiency policy
+ na_ontap_efficiency_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ vserver: ansible
+ state: present
+ policy_name: test
+ comment: This policy is for x and y
+ enabled: true
+ policy_type: threshold
+ qos_policy: background
+ changelog_threshold_percent: 20
+
+ - name: Create efficiency Scheduled efficiency Policy
+ na_ontap_efficiency_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ vserver: ansible
+ state: present
+ policy_name: test2
+ comment: This policy is for x and y
+ enabled: true
+ schedule: new_job_schedule
+ duration: 1
+ policy_type: scheduled
+ qos_policy: background
+"""
+
+RETURN = """
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapEfficiencyPolicy(object):
+ """
+ Create, delete and modify efficiency policy
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ policy_name=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ duration=dict(required=False, type='str'),
+ enabled=dict(required=False, type='bool'),
+ policy_type=dict(required=False, choices=['threshold', 'scheduled']),
+ qos_policy=dict(required=False, choices=['background', 'best_effort']),
+ schedule=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ changelog_threshold_percent=dict(required=False, type='int')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('changelog_threshold_percent', 'duration'), ('changelog_threshold_percent', 'schedule')]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+ if self.parameters.get('policy_type'):
+ if self.parameters['policy_type'] == 'threshold':
+ if self.parameters.get('duration'):
+ self.module.fail_json(msg="duration cannot be set if policy_type is threshold")
+ if self.parameters.get('schedule'):
+ self.module.fail_json(msg='schedule cannot be set if policy_type is threshold')
+ # if policy_type is 'scheduled'
+ else:
+ if self.parameters.get('changelog_threshold_percent'):
+ self.module.fail_json(msg='changelog_threshold_percent cannot be set if policy_type is scheduled')
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+
+ self.na_helper.zapi_int_keys = {
+ 'changelog_threshold_percent': 'changelog-threshold-percent'
+ }
+ self.na_helper.zapi_str_keys = {
+ 'policy_name': 'policy-name',
+ 'comment': 'comment',
+ 'policy_type': 'policy-type',
+ 'qos_policy': 'qos-policy',
+ 'schedule': 'schedule',
+ 'duration': 'duration'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'enabled': 'enabled'
+ }
+
+ def get_efficiency_policy(self):
+ """
+ Get a efficiency policy
+ :return: a efficiency-policy info
+ """
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-get-iter")
+ query = netapp_utils.zapi.NaElement("query")
+ sis_policy_info = netapp_utils.zapi.NaElement("sis-policy-info")
+ sis_policy_info.add_new_child("policy-name", self.parameters['policy_name'])
+ sis_policy_info.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(sis_policy_info)
+ sis_policy_obj.add_child_elem(query)
+ try:
+ results = self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error searching for efficiency policy %s: %s" % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return_value = {}
+ if results.get_child_by_name('num-records') and int(results.get_child_content('num-records')) == 1:
+ attributes_list = results.get_child_by_name('attributes-list')
+ sis_info = attributes_list.get_child_by_name('sis-policy-info')
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ return_value[option] = self.na_helper.get_value_for_int(from_zapi=True, value=sis_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ return_value[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=sis_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ return_value[option] = sis_info.get_child_content(zapi_key)
+ return return_value
+ return None
+
+ def create_efficiency_policy(self):
+ """
+ Creates a efficiency policy
+ :return: None
+ """
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-create")
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key,
+ self.na_helper.get_value_for_int(from_zapi=False,
+ value=self.parameters[option]))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key,
+ self.na_helper.get_value_for_bool(from_zapi=False,
+ value=self.parameters[option]))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key, str(self.parameters[option]))
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_efficiency_policy(self):
+ """
+ Delete a efficiency Policy
+ :return: None
+ """
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-delete")
+ sis_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_efficiency_policy(self, current, modify):
+ """
+ Modify a efficiency policy
+ :return: None
+ """
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-modify")
+ sis_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ # sis-policy-create zapi pre-checks the options and fails if it's not supported.
+ # sis-policy-modify pre-checks one of the options, but tries to modify the others even it's not supported. And it will mess up the vsim.
+ # Do the checks before sending to the zapi.
+ if current['policy_type'] == 'scheduled' and self.parameters.get('policy_type') != 'threshold':
+ if modify.get('changelog_threshold_percent'):
+ self.module.fail_json(msg="changelog_threshold_percent cannot be set if policy_type is scheduled")
+ elif current['policy_type'] == 'threshold' and self.parameters.get('policy_type') != 'scheduled':
+ if modify.get('duration'):
+ self.module.fail_json(msg="duration cannot be set if policy_type is threshold")
+ elif modify.get('schedule'):
+ self.module.fail_json(msg="schedule cannot be set if policy_type is threshold")
+ for attribute in modify:
+ sis_policy_obj.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_efficiency_policy", self.server)
+ current = self.get_efficiency_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_efficiency_policy()
+ elif cd_action == 'delete':
+ self.delete_efficiency_policy()
+ elif modify:
+ self.modify_efficiency_policy(current, modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapEfficiencyPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py
new file mode 100644
index 00000000..39c01e08
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_export_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_export_policy
+short_description: NetApp ONTAP manage export-policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create or destroy or rename export-policies on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified export policy should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the export-policy to manage.
+ type: str
+ required: true
+ from_name:
+ description:
+ - The name of the export-policy to be renamed.
+ type: str
+ version_added: 2.7.0
+ vserver:
+ required: true
+ type: str
+ description:
+ - Name of the vserver to use.
+'''
+
+EXAMPLES = """
+ - name: Create Export Policy
+ na_ontap_export_policy:
+ state: present
+ name: ansiblePolicyName
+ vserver: vs_hack
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Rename Export Policy
+ na_ontap_export_policy:
+ action: present
+ from_name: ansiblePolicyName
+ vserver: vs_hack
+ name: newPolicyName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete Export Policy
+ na_ontap_export_policy:
+ state: absent
+ name: ansiblePolicyName
+ vserver: vs_hack
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPExportPolicy(object):
+ """
+ Class with export policy methods
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str', default=None),
+ vserver=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_export_policy(self, name=None, uuid=None):
+ """
+ Return details about the export-policy
+ :param:
+ name : Name of the export-policy
+ :return: Details about the export-policy. None if not found.
+ :rtype: dict
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ params = {'fields': 'name',
+ 'name': name,
+ 'svm.uuid': uuid}
+ api = 'protocols/nfs/export-policies/'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching export policy: %s" % error)
+ if message['num_records'] > 0:
+ return {'policy-name': message['records'][0]['name']}
+ else:
+ return None
+
+ else:
+ export_policy_iter = netapp_utils.zapi.NaElement('export-policy-get-iter')
+ export_policy_info = netapp_utils.zapi.NaElement('export-policy-info')
+ export_policy_info.add_new_child('policy-name', name)
+ export_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(export_policy_info)
+ export_policy_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(export_policy_iter, True)
+ return_value = None
+ # check if query returns the expected export-policy
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ export_policy = result.get_child_by_name('attributes-list').get_child_by_name('export-policy-info').get_child_by_name('policy-name')
+ return_value = {
+ 'policy-name': export_policy
+ }
+ return return_value
+
+ def create_export_policy(self, uuid=None):
+ """
+ Creates an export policy
+ """
+ if self.use_rest:
+ params = {'name': self.parameters['name'],
+ 'svm.uuid': uuid}
+ api = 'protocols/nfs/export-policies'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating export policy: %s" % error)
+ else:
+ export_policy_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-create', **{'policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on creating export-policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_export_policy(self, policy_id=None):
+ """
+ Delete export-policy
+ """
+ if self.use_rest:
+ api = 'protocols/nfs/export-policies/' + str(policy_id)
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg=" Error on deleting export policy: %s" % error)
+ else:
+ export_policy_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-destroy', **{'policy-name': self.parameters['name'], })
+ try:
+ self.server.invoke_successfully(export_policy_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on deleting export-policy %s: %s'
+ % (self.parameters['name'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def rename_export_policy(self, policy_id=None):
+ """
+ Rename the export-policy.
+ """
+ if self.use_rest:
+ params = {'name': self.parameters['name']}
+ api = 'protocols/nfs/export-policies/' + str(policy_id)
+ dummy, error = self.rest_api.patch(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on renaming export policy: %s" % error)
+ else:
+ export_policy_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-rename', **{'policy-name': self.parameters['from_name'],
+ 'new-policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on renaming export-policy %s:%s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_export_policy_id(self, name=None):
+ """
+ Get a export policy's id
+ :return: id of the export policy
+ """
+ if name is None:
+ name = self.parameters['name']
+
+ params = {'fields': 'id',
+ 'svm.name': self.parameters['vserver'],
+ 'name': name
+ }
+ api = 'protocols/nfs/export-policies'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ if message['num_records'] == 0:
+ return None
+ else:
+ return message['records'][0]['id']
+
+ def get_export_policy_svm_uuid(self):
+ """
+ Get a svm's uuid
+ :return: uuid of the svm
+ """
+ params = {'svm.name': self.parameters['vserver']}
+ api = 'protocols/nfs/export-policies'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ return message['records'][0]['svm']['uuid']
+
+ def apply(self):
+ """
+ Apply action to export-policy
+ """
+ policy_id, uuid = None, None
+ cd_action, rename = None, None
+
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_export_policy", self.server)
+ if self.use_rest:
+ uuid = self.get_export_policy_svm_uuid()
+ if self.parameters.get('from_name'):
+ policy_id = self.get_export_policy_id(self.parameters['from_name'])
+ else:
+ policy_id = self.get_export_policy_id()
+
+ current = self.get_export_policy(uuid=uuid)
+
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_export_policy(self.parameters['from_name']), current)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming: export policy %s does not exist" % self.parameters['from_name'])
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_export_policy(policy_id=policy_id)
+ elif cd_action == 'create':
+ self.create_export_policy(uuid=uuid)
+ elif cd_action == 'delete':
+ self.delete_export_policy(policy_id=policy_id)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action
+ """
+ export_policy = NetAppONTAPExportPolicy()
+ export_policy.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py
new file mode 100644
index 00000000..7b0b0acc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+
+# (c) 2018-2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_export_policy_rule
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_export_policy_rule
+
+short_description: NetApp ONTAP manage export policy rules
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete or modify export rules in ONTAP
+
+options:
+ state:
+ description:
+ - Whether the specified export policy rule should exist or not.
+ required: false
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the export policy this rule will be added to (or modified, or removed from).
+ required: True
+ type: str
+ aliases:
+ - policy_name
+
+ client_match:
+ description:
+ - List of Client Match host names, IP Addresses, Netgroups, or Domains
+ - If rule_index is not provided, client_match is used as a key to fetch current rule to determine create,delete,modify actions.
+ If a rule with provided client_match exists, a new rule will not be created, but the existing rule will be modified or deleted.
+ If a rule with provided client_match doesn't exist, a new rule will be created if state is present.
+ type: list
+ elements: str
+
+ anonymous_user_id:
+ description:
+ - User name or ID to which anonymous users are mapped. Default value is '65534'.
+ type: int
+
+ ro_rule:
+ description:
+ - List of Read only access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ rw_rule:
+ description:
+ - List of Read Write access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ super_user_security:
+ description:
+ - List of Read Write access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ allow_suid:
+ description:
+ - If 'true', NFS server will honor SetUID bits in SETATTR operation. Default value on creation is 'true'
+ type: bool
+
+ protocol:
+ description:
+ - List of Client access protocols.
+ - Default value is set to 'any' during create.
+ choices: [any,nfs,nfs3,nfs4,cifs,flexcache]
+ type: list
+ elements: str
+
+ rule_index:
+ description:
+ - index of the export policy rule
+ type: int
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Create ExportPolicyRule
+ na_ontap_export_policy_rule:
+ state: present
+ name: default123
+ vserver: ci_dev
+ client_match: 0.0.0.0/0,1.1.1.0/24
+ ro_rule: krb5,krb5i
+ rw_rule: any
+ protocol: nfs,nfs3
+ super_user_security: any
+ anonymous_user_id: 65534
+ allow_suid: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify ExportPolicyRule
+ na_ontap_export_policy_rule:
+ state: present
+ name: default123
+ rule_index: 100
+ client_match: 0.0.0.0/0
+ anonymous_user_id: 65521
+ ro_rule: ntlm
+ rw_rule: any
+ protocol: any
+ allow_suid: false
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete ExportPolicyRule
+ na_ontap_export_policy_rule:
+ state: absent
+ name: default123
+ rule_index: 100
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppontapExportRule(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', aliases=['policy_name']),
+ protocol=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'nfs', 'nfs3', 'nfs4', 'cifs', 'flexcache']),
+ client_match=dict(required=False, type='list', elements='str'),
+ ro_rule=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ rw_rule=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ super_user_security=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ allow_suid=dict(required=False, type='bool'),
+ rule_index=dict(required=False, type='int'),
+ anonymous_user_id=dict(required=False, type='int'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'client_match': 'client-match',
+ 'name': 'policy-name'
+ }
+ self.na_helper.zapi_list_keys = {
+ 'protocol': ('protocol', 'access-protocol'),
+ 'ro_rule': ('ro-rule', 'security-flavor'),
+ 'rw_rule': ('rw-rule', 'security-flavor'),
+ 'super_user_security': ('super-user-security', 'security-flavor'),
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'allow_suid': 'is-allow-set-uid-enabled'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'rule_index': 'rule-index',
+ 'anonymous_user_id': 'anonymous-user-id'
+
+ }
+
+ def set_query_parameters(self):
+ """
+ Return dictionary of query parameters and
+ :return:
+ """
+ query = {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ if self.parameters.get('rule_index'):
+ query['rule-index'] = self.parameters['rule_index']
+ elif self.parameters.get('client_match'):
+ query['client-match'] = self.parameters['client_match']
+ else:
+ self.module.fail_json(
+ msg="Need to specify at least one of the rule_index and client_match option.")
+
+ attributes = {
+ 'query': {
+ 'export-rule-info': query
+ }
+ }
+ return attributes
+
+ def get_export_policy_rule(self):
+ """
+ Return details about the export policy rule
+ :param:
+ name : Name of the export_policy
+ :return: Details about the export_policy. None if not found.
+ :rtype: dict
+ """
+ current, result = None, None
+ rule_iter = netapp_utils.zapi.NaElement('export-rule-get-iter')
+ rule_iter.translate_struct(self.set_query_parameters())
+ try:
+ result = self.server.invoke_successfully(rule_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result is not None and \
+ result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ current = dict()
+ rule_info = result.get_child_by_name('attributes-list').get_child_by_name('export-rule-info')
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ current[item_key] = rule_info.get_child_content(zapi_key)
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ current[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=rule_info[zapi_key])
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ current[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=rule_info[zapi_key])
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ current[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=rule_info.get_child_by_name(parent))
+ current['num_records'] = int(result.get_child_content('num-records'))
+ if not self.parameters.get('rule_index'):
+ self.parameters['rule_index'] = current['rule_index']
+ return current
+
+ def get_export_policy(self):
+ """
+ Return details about the export-policy
+ :param:
+ name : Name of the export-policy
+
+ :return: Details about the export-policy. None if not found.
+ :rtype: dict
+ """
+ export_policy_iter = netapp_utils.zapi.NaElement('export-policy-get-iter')
+ attributes = {
+ 'query': {
+ 'export-policy-info': {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+
+ export_policy_iter.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(export_policy_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting export policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ return result
+
+ return None
+
+ def add_parameters_for_create_or_modify(self, na_element_object, values):
+ """
+ Add children node for create or modify NaElement object
+ :param na_element_object: modify or create NaElement object
+ :param values: dictionary of cron values to be added
+ :return: None
+ """
+ for key in values:
+ if key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(key)
+ na_element_object[zapi_key] = values[key]
+ elif key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(key)
+ na_element_object.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent=parent_key,
+ zapi_child=child_key,
+ data=values[key]))
+ elif key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(key)
+ na_element_object[zapi_key] = self.na_helper.get_value_for_int(from_zapi=False,
+ value=values[key])
+ elif key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(key)
+ na_element_object[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
+ value=values[key])
+
+ def create_export_policy_rule(self):
+ """
+ create rule for the export policy.
+ """
+ for key in ['client_match', 'ro_rule', 'rw_rule']:
+ if self.parameters.get(key) is None:
+ self.module.fail_json(msg='Error: Missing required param for creating export policy rule %s' % key)
+ export_rule_create = netapp_utils.zapi.NaElement('export-rule-create')
+ self.add_parameters_for_create_or_modify(export_rule_create, self.parameters)
+ try:
+ self.server.invoke_successfully(export_rule_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def create_export_policy(self):
+ """
+ Creates an export policy
+ """
+ export_policy_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-create', **{'policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating export-policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_export_policy_rule(self, rule_index):
+ """
+ delete rule for the export policy.
+ """
+ export_rule_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-rule-destroy', **{'policy-name': self.parameters['name'],
+ 'rule-index': str(rule_index)})
+
+ try:
+ self.server.invoke_successfully(export_rule_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_export_policy_rule(self, params):
+ '''
+ Modify an existing export policy rule
+ :param params: dict() of attributes with desired values
+ :return: None
+ '''
+ export_rule_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-rule-modify', **{'policy-name': self.parameters['name'],
+ 'rule-index': str(self.parameters['rule_index'])})
+ self.add_parameters_for_create_or_modify(export_rule_modify, params)
+ try:
+ self.server.invoke_successfully(export_rule_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying allow_suid %s: %s'
+ % (self.parameters['allow_suid'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ netapp_utils.ems_log_event("na_ontap_export_policy_rules", self.server)
+
+ def apply(self):
+ ''' Apply required action from the play'''
+ self.autosupport_log()
+ # convert client_match list to comma-separated string
+ if self.parameters.get('client_match') is not None:
+ self.parameters['client_match'] = ','.join(self.parameters['client_match'])
+ self.parameters['client_match'] = self.parameters['client_match'].replace(' ', '')
+
+ current, modify = self.get_export_policy_rule(), None
+ action = self.na_helper.get_cd_action(current, self.parameters)
+ if action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ # create export policy (if policy doesn't exist) only when changed=True
+ if action == 'create':
+ if not self.get_export_policy():
+ self.create_export_policy()
+ self.create_export_policy_rule()
+ elif action == 'delete':
+ if current['num_records'] > 1:
+ self.module.fail_json(msg='Multiple export policy rules exist.'
+ 'Please specify a rule_index to delete')
+ self.delete_export_policy_rule(current['rule_index'])
+ elif modify:
+ self.modify_export_policy_rule(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ ''' Create object and call apply '''
+ rule_obj = NetAppontapExportRule()
+ rule_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py
new file mode 100644
index 00000000..d0edd545
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_fcp
+short_description: NetApp ONTAP Start, Stop and Enable FCP services.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Start, Stop and Enable FCP services.
+options:
+ state:
+ description:
+ - Whether the FCP should be enabled or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ status:
+ description:
+ - Whether the FCP should be up or down
+ choices: ['up', 'down']
+ type: str
+ default: up
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: create FCP
+ na_ontap_fcp:
+ state: present
+ status: down
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ vserver: "{{vservername}}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapFCP(object):
+ """
+ Enable and Disable FCP
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ status=dict(required=False, type='str', choices=['up', 'down'], default='up')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def create_fcp(self):
+ """
+ Create's and Starts an FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-create'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating FCP: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def start_fcp(self):
+ """
+ Starts an existing FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-start'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13013 denotes fcp service already started.
+ if to_native(error.code) == "13013":
+ return None
+ else:
+ self.module.fail_json(msg='Error starting FCP %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def stop_fcp(self):
+ """
+ Steps an Existing FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-stop'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Stoping FCP %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def destroy_fcp(self):
+ """
+ Destroys an already stopped FCP
+ :return:
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-destroy'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying FCP %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_fcp(self):
+ fcp_obj = netapp_utils.zapi.NaElement('fcp-service-get-iter')
+ fcp_info = netapp_utils.zapi.NaElement('fcp-service-info')
+ fcp_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(fcp_info)
+ fcp_obj.add_child_elem(query)
+ result = self.server.invoke_successfully(fcp_obj, True)
+ # There can only be 1 FCP per vserver. If true, one is set up, else one isn't set up
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ return True
+ else:
+ return False
+
+ def current_status(self):
+ try:
+ status = self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-status'), True)
+ return status.get_child_content('is-available') == 'true'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying FCP: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_fcp", cserver)
+ exists = self.get_fcp()
+ changed = False
+ if self.parameters['state'] == 'present':
+ if exists:
+ if self.parameters['status'] == 'up':
+ if not self.current_status():
+ if not self.module.check_mode:
+ self.start_fcp()
+ changed = True
+ else:
+ if self.current_status():
+ if not self.module.check_mode:
+ self.stop_fcp()
+ changed = True
+ else:
+ if not self.module.check_mode:
+ self.create_fcp()
+ if self.parameters['status'] == 'up':
+ self.start_fcp()
+ elif self.parameters['status'] == 'down':
+ self.stop_fcp()
+ changed = True
+ else:
+ if exists:
+ if not self.module.check_mode:
+ if self.current_status():
+ self.stop_fcp()
+ self.destroy_fcp()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Start, Stop and Enable FCP services.
+ """
+ obj = NetAppOntapFCP()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py
new file mode 100644
index 00000000..e56ceeef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+
+module: na_ontap_file_directory_policy
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP create, delete, or modify vserver security file-directory policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.8.0
+description:
+ - Create, modify, or destroy vserver security file-directory policy
+ - Add or remove task from policy.
+ - Each time a policy/task is created/modified, automatically apply policy to vserver.
+
+options:
+ state:
+ description:
+ - Whether the specified policy or task should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the policy.
+ required: true
+ type: str
+
+ policy_name:
+ description:
+ - Specifies the name of the policy.
+ type: str
+ required: true
+
+ access_control:
+ description:
+ - Specifies the access control of task to be applied.
+ choices: ['file_directory', 'slag']
+ type: str
+
+ ntfs_mode:
+ description:
+ - Specifies NTFS Propagation Mode.
+ choices: ['propagate', 'ignore', 'replace']
+ type: str
+
+ ntfs_sd:
+ description:
+ - Specifies NTFS security descriptor identifier.
+ type: list
+ elements: str
+
+ path:
+ description:
+ - Specifies the file or folder path of the task.
+ - If path is specified and the policy which the task is adding to, does not exist, it will create the policy first then add the task to it.
+ - If path is specified, delete operation only removes task from policy.
+ type: str
+
+ security_type:
+ description:
+ - Specifies the type of security.
+ type: str
+ choices: ['ntfs', 'nfsv4']
+
+ ignore_broken_symlinks:
+ description:
+ - Skip Broken Symlinks.
+ - Options used when applying the policy to vserver.
+ type: bool
+
+"""
+
+EXAMPLES = """
+
+ - name: create policy
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ vserver: ansible
+ policy_name: file_policy
+ ignore_broken_symlinks: false
+
+ - name: add task to existing file_policy
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ vserver: ansible
+ policy_name: file_policy
+ path: /vol
+ ntfs_sd: ansible_sd
+ ntfs_mode: propagate
+
+ - name: delete task from file_policy.
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ vserver: ansible
+ policy_name: file_policy
+ path: /vol
+
+ - name: delete file_policy along with the tasks.
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ vserver: ansible
+ policy_name: file_policy
+
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapFilePolicy(object):
+
+ def __init__(self):
+ """
+ Initialize the Ontap file directory policy class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ access_control=dict(required=False, type='str', choices=['file_directory', 'slag']),
+ ntfs_mode=dict(required=False, choices=['propagate', 'ignore', 'replace']),
+ ntfs_sd=dict(required=False, type='list', elements='str'),
+ path=dict(required=False, type='str'),
+ security_type=dict(required=False, type='str', choices=['ntfs', 'nfsv4']),
+ ignore_broken_symlinks=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def create_policy(self):
+ policy_obj = netapp_utils.zapi.NaElement("file-directory-security-policy-create")
+ policy_obj.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating file-directory policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_policy_iter(self):
+ policy_get_iter = netapp_utils.zapi.NaElement('file-directory-security-policy-get-iter')
+ policy_info = netapp_utils.zapi.NaElement('file-directory-security-policy')
+ policy_info.add_new_child('vserver', self.parameters['vserver'])
+ policy_info.add_new_child('policy-name', self.parameters['policy_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_info)
+ policy_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(policy_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ policy = attributes_list.get_child_by_name('file-directory-security-policy')
+ return policy.get_child_content('policy-name')
+ return None
+
+ def remove_policy(self):
+ remove_policy = netapp_utils.zapi.NaElement('file-directory-security-policy-delete')
+ remove_policy.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(remove_policy, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error removing file-directory policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_task_iter(self):
+ task_get_iter = netapp_utils.zapi.NaElement('file-directory-security-policy-task-get-iter')
+ task_info = netapp_utils.zapi.NaElement('file-directory-security-policy-task')
+ task_info.add_new_child('vserver', self.parameters['vserver'])
+ task_info.add_new_child('policy-name', self.parameters['policy_name'])
+ task_info.add_new_child('path', self.parameters['path'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(task_info)
+ task_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(task_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching task from file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ task = attributes_list.get_child_by_name('file-directory-security-policy-task')
+ task_result = dict()
+ task_result['path'] = task.get_child_content('path')
+ if task.get_child_by_name('ntfs-mode'):
+ task_result['ntfs_mode'] = task.get_child_content('ntfs-mode')
+ if task.get_child_by_name('security-type'):
+ task_result['security_type'] = task.get_child_content('security-type')
+ if task.get_child_by_name('ntfs-sd'):
+ task_result['ntfs_sd'] = [ntfs_sd.get_content() for ntfs_sd in task.get_child_by_name('ntfs-sd').get_children()]
+ return task_result
+ return None
+
+ def add_task_to_policy(self):
+ policy_add_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-add')
+ policy_add_task.add_new_child('path', self.parameters['path'])
+ policy_add_task.add_new_child('policy-name', self.parameters['policy_name'])
+ if self.parameters.get('access_control') is not None:
+ policy_add_task.add_new_child('access-control', self.parameters['access_control'])
+ if self.parameters.get('ntfs_mode') is not None:
+ policy_add_task.add_new_child('ntfs-mode', self.parameters['ntfs_mode'])
+ if self.parameters.get('ntfs_sd') is not None:
+ ntfs_sds = netapp_utils.zapi.NaElement('ntfs-sd')
+ for ntfs_sd in self.parameters['ntfs_sd']:
+ ntfs_sds.add_new_child('file-security-ntfs-sd', ntfs_sd)
+ policy_add_task.add_child_elem(ntfs_sds)
+ if self.parameters.get('security_type') is not None:
+ policy_add_task.add_new_child('security-type', self.parameters['security_type'])
+ try:
+ self.server.invoke_successfully(policy_add_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding task to file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_task_from_policy(self):
+ policy_remove_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-remove')
+ policy_remove_task.add_new_child('path', self.parameters['path'])
+ policy_remove_task.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(policy_remove_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing task from file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_task(self, modify):
+ policy_modify_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-modify')
+ policy_modify_task.add_new_child('path', self.parameters['path'])
+ policy_modify_task.add_new_child('policy-name', self.parameters['policy_name'])
+ if modify.get('ntfs_mode') is not None:
+ policy_modify_task.add_new_child('ntfs-mode', self.parameters['ntfs_mode'])
+ if modify.get('ntfs_sd') is not None:
+ ntfs_sds = netapp_utils.zapi.NaElement('ntfs-sd')
+ for ntfs_sd in self.parameters['ntfs_sd']:
+ ntfs_sds.add_new_child('file-security-ntfs-sd', ntfs_sd)
+ policy_modify_task.add_child_elem(ntfs_sds)
+ if modify.get('security_type') is not None:
+ policy_modify_task.add_new_child('security-type', self.parameters['security_type'])
+ try:
+ self.server.invoke_successfully(policy_modify_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying task in file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_sd(self):
+ set_sd = netapp_utils.zapi.NaElement('file-directory-security-set')
+ set_sd.add_new_child('policy-name', self.parameters['policy_name'])
+ if self.parameters.get('ignore-broken-symlinks'):
+ set_sd.add_new_child('ignore-broken-symlinks', str(self.parameters['ignore_broken_symlinks']))
+ try:
+ self.server.invoke_successfully(set_sd, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error applying file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_file_directory_policy", self.server)
+ current = self.get_policy_iter()
+ cd_action, task_cd_action, task_modify = None, None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters.get('path'):
+ current_task = self.get_task_iter()
+ task_cd_action = self.na_helper.get_cd_action(current_task, self.parameters)
+ if task_cd_action is None and self.parameters['state'] == 'present':
+ task_modify = self.na_helper.get_modified_attributes(current_task, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.parameters.get('path'):
+ if task_cd_action == 'create':
+ # if policy doesn't exist, create the policy first.
+ if cd_action == 'create':
+ self.create_policy()
+ self.add_task_to_policy()
+ self.set_sd()
+ elif task_cd_action == 'delete':
+ # delete the task, not the policy.
+ self.remove_task_from_policy()
+ elif task_modify:
+ self.modify_task(task_modify)
+ self.set_sd()
+ else:
+ if cd_action == 'create':
+ self.create_policy()
+ self.set_sd()
+ elif cd_action == 'delete':
+ self.remove_policy()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates, deletes and modifies file directory policy
+ """
+ obj = NetAppOntapFilePolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py
new file mode 100644
index 00000000..3192183a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py
@@ -0,0 +1,366 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_firewall_policy
+short_description: NetApp ONTAP Manage a firewall policy
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Configure firewall on an ONTAP node and manage firewall policy for an ONTAP SVM
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+requirements:
+ - Python package ipaddress. Install using 'pip install ipaddress'
+options:
+ state:
+ description:
+ - Whether to set up a firewall policy or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ allow_list:
+ description:
+ - A list of IPs and masks to use.
+ - The host bits of the IP addresses used in this list must be set to 0.
+ type: list
+ elements: str
+ policy:
+ description:
+ - A policy name for the firewall policy
+ type: str
+ service:
+ description:
+ - The service to apply the policy to
+ - https and ssh are not supported starting with ONTAP 9.6
+ - portmap is supported for ONTAP 9.4, 9.5 and 9.6
+ choices: ['dns', 'http', 'https', 'ndmp', 'ndmps', 'ntp', 'portmap', 'rsh', 'snmp', 'ssh', 'telnet']
+ type: str
+ vserver:
+ description:
+ - The Vserver to apply the policy to.
+ type: str
+ enable:
+ description:
+ - enable firewall on a node
+ choices: ['enable', 'disable']
+ type: str
+ logging:
+ description:
+ - enable logging for firewall on a node
+ choices: ['enable', 'disable']
+ type: str
+ node:
+ description:
+ - The node to run the firewall configuration on
+ type: str
+'''
+
+EXAMPLES = """
+ - name: create firewall Policy
+ na_ontap_firewall_policy:
+ state: present
+ allow_list: [1.2.3.0/24,1.3.0.0/16]
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Modify firewall Policy
+ na_ontap_firewall_policy:
+ state: present
+ allow_list: [1.5.3.0/24]
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Destory firewall Policy
+ na_ontap_firewall_policy:
+ state: absent
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Enable firewall and logging on a node
+ na_ontap_firewall_policy:
+ node: test-vsim1
+ enable: enable
+ logging: enable
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+try:
+ import ipaddress
+ HAS_IPADDRESS_LIB = True
+except ImportError:
+ HAS_IPADDRESS_LIB = False
+
+import sys
+# Python 3 merged unicode in to str, this is to make sure nothing breaks
+# https://stackoverflow.com/questions/19877306/nameerror-global-name-unicode-is-not-defined-in-python-3
+if sys.version_info[0] >= 3:
+ unicode = str
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPFirewallPolicy(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ allow_list=dict(required=False, type='list', elements='str'),
+ policy=dict(required=False, type='str'),
+ service=dict(required=False, type='str', choices=['dns', 'http', 'https', 'ndmp', 'ndmps',
+ 'ntp', 'portmap', 'rsh', 'snmp', 'ssh', 'telnet']),
+ vserver=dict(required=False, type="str"),
+ enable=dict(required=False, type="str", choices=['enable', 'disable']),
+ logging=dict(required=False, type="str", choices=['enable', 'disable']),
+ node=dict(required=False, type="str")
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=(['policy', 'service', 'vserver'],
+ ['enable', 'node']
+ ),
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ if HAS_IPADDRESS_LIB is False:
+ self.module.fail_json(msg="the python ipaddress lib is required for this module")
+ return
+
+ def validate_ip_addresses(self):
+ '''
+ Validate if the given IP address is a network address (i.e. it's host bits are set to 0)
+ ONTAP doesn't validate if the host bits are set,
+ and hence doesn't add a new address unless the IP is from a different network.
+ So this validation allows the module to be idempotent.
+ :return: None
+ '''
+ for ip in self.parameters['allow_list']:
+ # create an IPv4 object for current IP address
+ if sys.version_info[0] >= 3:
+ ip_addr = str(ip)
+ else:
+ ip_addr = unicode(ip) # pylint: disable=undefined-variable
+ # get network address from netmask, throw exception if address is not a network address
+ try:
+ ipaddress.ip_network(ip_addr)
+ except ValueError as exc:
+ self.module.fail_json(msg='Error: Invalid IP address value for allow_list parameter.'
+ 'Please specify a network address without host bits set: %s'
+ % (to_native(exc)))
+
+ def get_firewall_policy(self):
+ """
+ Get a firewall policy
+ :return: returns a firewall policy object, or returns False if there are none
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-get-iter")
+ attributes = {
+ 'query': {
+ 'net-firewall-policy-info': self.firewall_policy_attributes()
+ }
+ }
+ net_firewall_policy_obj.translate_struct(attributes)
+
+ try:
+ result = self.server.invoke_successfully(net_firewall_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting firewall policy %s:%s" % (self.parameters['policy'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ policy_info = attributes_list.get_child_by_name('net-firewall-policy-info')
+ ips = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=policy_info.get_child_by_name('allow-list'))
+ return {
+ 'service': policy_info['service'],
+ 'allow_list': ips}
+ return None
+
+ def create_firewall_policy(self):
+ """
+ Create a firewall policy for given vserver
+ :return: None
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-create")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ if self.parameters.get('allow_list'):
+ self.validate_ip_addresses()
+ net_firewall_policy_obj.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent='allow-list',
+ zapi_child='ip-and-mask',
+ data=self.parameters['allow_list'])
+ )
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def destroy_firewall_policy(self):
+ """
+ Destroy a Firewall Policy from a vserver
+ :return: None
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-destroy")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error destroying Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def modify_firewall_policy(self, modify):
+ """
+ Modify a firewall Policy on a vserver
+ :return: none
+ """
+ self.validate_ip_addresses()
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-modify")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ net_firewall_policy_obj.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent='allow-list',
+ zapi_child='ip-and-mask',
+ data=modify['allow_list']))
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def firewall_policy_attributes(self):
+ return {
+ 'policy': self.parameters['policy'],
+ 'service': self.parameters['service'],
+ 'vserver': self.parameters['vserver'],
+ }
+
+ def get_firewall_config_for_node(self):
+ """
+ Get firewall configuration on the node
+ :return: dict() with firewall config details
+ """
+ if self.parameters.get('logging'):
+ if self.parameters.get('node') is None:
+ self.module.fail_json(msg='Error: Missing parameter \'node\' to modify firewall logging')
+ net_firewall_config_obj = netapp_utils.zapi.NaElement("net-firewall-config-get")
+ net_firewall_config_obj.add_new_child('node-name', self.parameters['node'])
+ try:
+ result = self.server.invoke_successfully(net_firewall_config_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting Firewall Configuration: %s" % (to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes'):
+ firewall_info = result['attributes'].get_child_by_name('net-firewall-config-info')
+ return {'enable': self.change_status_to_bool(firewall_info.get_child_content('is-enabled'), to_zapi=False),
+ 'logging': self.change_status_to_bool(firewall_info.get_child_content('is-logging'), to_zapi=False)}
+ return None
+
+ def modify_firewall_config(self, modify):
+ """
+ Modify the configuration of a firewall on node
+ :return: None
+ """
+ net_firewall_config_obj = netapp_utils.zapi.NaElement("net-firewall-config-modify")
+ net_firewall_config_obj.add_new_child('node-name', self.parameters['node'])
+ if modify.get('enable'):
+ net_firewall_config_obj.add_new_child('is-enabled', self.change_status_to_bool(self.parameters['enable']))
+ if modify.get('logging'):
+ net_firewall_config_obj.add_new_child('is-logging', self.change_status_to_bool(self.parameters['logging']))
+ try:
+ self.server.invoke_successfully(net_firewall_config_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying Firewall Config: %s" % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def change_status_to_bool(self, input, to_zapi=True):
+ if to_zapi:
+ return 'true' if input == 'enable' else 'false'
+ else:
+ return 'enable' if input == 'true' else 'disable'
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_firewall_policy", cserver)
+
+ def apply(self):
+ self.autosupport_log()
+ cd_action, modify, modify_config = None, None, None
+ if self.parameters.get('policy'):
+ current = self.get_firewall_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.parameters.get('node'):
+ current_config = self.get_firewall_config_for_node()
+ # firewall config for a node is always present, we cannot create or delete a firewall on a node
+ modify_config = self.na_helper.get_modified_attributes(current_config, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_firewall_policy()
+ elif cd_action == 'delete':
+ self.destroy_firewall_policy()
+ else:
+ if modify:
+ self.modify_firewall_policy(modify)
+ if modify_config:
+ self.modify_firewall_config(modify_config)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ :return: nothing
+ """
+ cg_obj = NetAppONTAPFirewallPolicy()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py
new file mode 100644
index 00000000..0288506c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py
@@ -0,0 +1,737 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Update ONTAP service-prosessor firmware
+ - The recommend procedure is to
+ 1. download the firmware package from the NetApp Support site
+ 2. copy the package to a web server
+ 3. download the package from the web server using this module
+ - Once a disk qualification, disk, shelf, or ACP firmware package is downloaded, ONTAP will automatically update the related resources in background.
+ - It may take some time to complete.
+ - For service processor, the update requires a node reboot to take effect.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_firmware_upgrade
+options:
+ state:
+ description:
+ - Whether the specified ONTAP firmware should be upgraded or not.
+ default: present
+ type: str
+ node:
+ description:
+ - Node on which the device is located.
+ - Not required if package_url is present and force_disruptive_update is False.
+ - If this option is not given, the firmware will be downloaded on all nodes in the cluster,
+ - and the resources will be updated in background on all nodes, except for service processor.
+ - For service processor, the upgrade will happen automatically when each node is rebooted.
+ type: str
+ clear_logs:
+ description:
+ - Clear logs on the device after update. Default value is true.
+ - Not used if force_disruptive_update is False.
+ type: bool
+ default: true
+ package:
+ description:
+ - Name of the package file containing the firmware to be installed. Not required when -baseline is true.
+ - Not used if force_disruptive_update is False.
+ type: str
+ package_url:
+ description:
+ - URL of the package file containing the firmware to be downloaded.
+ - Once the package file is downloaded to a node, the firmware update will happen automatically in background.
+ - For SP, the upgrade will happen automatically when a node is rebooted.
+ - For SP, the upgrade will happen automatically if autoupdate is enabled (which is the recommended setting).
+ version_added: "20.4.1"
+ type: str
+ force_disruptive_update:
+ description:
+ - If set to C(False), and URL is given, the upgrade is non disruptive. If URL is not given, no operation is performed.
+ - Do not set this to C(True), unless directed by NetApp Tech Support.
+ - It will force an update even if the resource is not ready for it, and can be disruptive.
+ type: bool
+ version_added: "20.4.1"
+ default: False
+ shelf_module_fw:
+ description:
+ - Shelf module firmware to be updated to.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ type: str
+ disk_fw:
+ description:
+ - disk firmware to be updated to.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ type: str
+ update_type:
+ description:
+ - Type of firmware update to be performed. Options include serial_full, serial_differential, network_full.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ type: str
+ install_baseline_image:
+ description:
+ - Install the version packaged with ONTAP if this parameter is set to true. Otherwise, package must be used to specify the package to install.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ type: bool
+ default: false
+ firmware_type:
+ description:
+ - Type of firmware to be upgraded. Options include shelf, ACP, service-processor, and disk.
+ - For shelf firmware upgrade the operation is asynchronous, and therefore returns no errors that might occur during the download process.
+ - Shelf firmware upgrade is idempotent if shelf_module_fw is provided .
+ - disk firmware upgrade is idempotent if disk_fw is provided .
+ - With check mode, SP, ACP, disk, and shelf firmware upgrade is not idempotent.
+ - This operation will only update firmware on shelves/disk that do not have the latest firmware-revision.
+ - Not used if force_disruptive_update is False (ONTAP will automatically detect the firmware type)
+ choices: ['service-processor', 'shelf', 'acp', 'disk']
+ type: str
+ fail_on_502_error:
+ description:
+ - The firmware download may take time if the web server is slow and if there are many nodes in the cluster.
+ - ONTAP will break the ZAPI connection after 5 minutes with a 502 Bad Gateway error, even though the download \
+is still happening.
+ - By default, this module ignores this error and assumes the download is progressing as ONTAP does not \
+provide a way to check the status.
+ - When setting this option to true, the module will report 502 as an error.
+ type: bool
+ default: false
+ version_added: "20.6.0"
+ rename_package:
+ description:
+ - Rename the package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ type: str
+ version_added: "20.6.1"
+ replace_package:
+ description:
+ - Replace the local package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ type: bool
+ version_added: "20.6.1"
+ reboot_sp:
+ description:
+ - Reboot service processor before downloading package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ type: bool
+ default: true
+ version_added: "20.6.1"
+short_description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk.
+version_added: 2.9.0
+'''
+
+EXAMPLES = """
+
+ - name: firmware upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: firmware upgrade, confirm successful download
+ na_ontap_firmware_upgrade:
+ state: present
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ fail_on_502_error: true
+ - name: SP firmware upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package: "{{ file name }}"
+ package_url: "{{ web_link }}"
+ clear_logs: True
+ install_baseline_image: False
+ update_type: serial_full
+ force_disruptive_update: False
+ firmware_type: service-processor
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: SP firmware download replace package
+ tags:
+ - sp_download
+ na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package_url: "{{ web_link }}"
+ firmware_type: service-processor
+ replace_package: true
+ reboot_sp: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ - name: SP firmware download rename package
+ tags:
+ - sp_download
+ na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package_url: "{{ web_link }}"
+ firmware_type: service-processor
+ rename_package: SP_FW.zip
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ - name: ACP firmware download and upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ firmware_type: acp
+ force_disruptive_update: False
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: shelf firmware upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ firmware_type: shelf
+ shelf_module_fw: 1221
+ force_disruptive_update: False
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: disk firmware upgrade
+ na_ontap_firmware_upgrade:
+ state: present
+ firmware_type: disk
+ disk_fw: NA02
+ force_disruptive_update: False
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+msg:
+ description: Returns additional information in case of success.
+ returned: always
+ type: str
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import time
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+MSGS = dict(
+ no_action='No action taken.',
+ dl_completed='Firmware download completed.',
+ dl_completed_slowly='Firmware download completed, slowly.',
+ dl_in_progress='Firmware download still in progress.'
+)
+
+
+class NetAppONTAPFirmwareUpgrade(object):
+ """
+ Class with ONTAP firmware upgrade methods
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ node=dict(required=False, type='str'),
+ firmware_type=dict(required=False, type='str', choices=['service-processor', 'shelf', 'acp', 'disk']),
+ clear_logs=dict(required=False, type='bool', default=True),
+ package=dict(required=False, type='str'),
+ install_baseline_image=dict(required=False, type='bool', default=False),
+ update_type=dict(required=False, type='str'),
+ shelf_module_fw=dict(required=False, type='str'),
+ disk_fw=dict(required=False, type='str'),
+ package_url=dict(required=False, type='str'),
+ force_disruptive_update=dict(required=False, type='bool', default=False),
+ fail_on_502_error=dict(required=False, type='bool', default=False),
+ rename_package=dict(required=False, type='str'),
+ replace_package=dict(required=False, type='bool'),
+ reboot_sp=dict(required=False, type='bool', default=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('firmware_type', 'acp', ['node']),
+ ('firmware_type', 'disk', ['node']),
+ ('firmware_type', 'service-processor', ['node']),
+ ('force_disruptive_update', True, ['firmware_type']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('firmware_type') == 'service-processor':
+ if self.parameters.get('install_baseline_image') and self.parameters.get('package') is not None:
+ self.module.fail_json(msg='Do not specify both package and install_baseline_image: true')
+ if not self.parameters.get('package') and self.parameters.get('install_baseline_image') == 'False':
+ self.module.fail_json(msg='Specify at least one of package or install_baseline_image')
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ def firmware_image_get_iter(self):
+ """
+ Compose NaElement object to query current firmware version
+ :return: NaElement object for firmware_image_get_iter with query
+ """
+ firmware_image_get = netapp_utils.zapi.NaElement('service-processor-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ firmware_image_info = netapp_utils.zapi.NaElement('service-processor-info')
+ firmware_image_info.add_new_child('node', self.parameters['node'])
+ query.add_child_elem(firmware_image_info)
+ firmware_image_get.add_child_elem(query)
+ return firmware_image_get
+
+ def firmware_image_get(self, node_name):
+ """
+ Get current firmware image info
+ :return: True if query successful, else return None
+ """
+ firmware_image_get_iter = self.firmware_image_get_iter()
+ try:
+ result = self.server.invoke_successfully(firmware_image_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching firmware image details: %s: %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+ # return firmware image details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ sp_info = result.get_child_by_name('attributes-list').get_child_by_name('service-processor-info')
+ firmware_version = sp_info.get_child_content('firmware-version')
+ return firmware_version
+ return None
+
+ def acp_firmware_required_get(self):
+ """
+ where acp firmware upgrade is required
+ :return: True is firmware upgrade is required else return None
+ """
+ acp_firmware_get_iter = netapp_utils.zapi.NaElement('storage-shelf-acp-module-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ acp_info = netapp_utils.zapi.NaElement('storage-shelf-acp-module')
+ query.add_child_elem(acp_info)
+ acp_firmware_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(acp_firmware_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching acp firmware details details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('attributes-list').get_child_by_name('storage-shelf-acp-module'):
+ acp_module_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'storage-shelf-acp-module')
+ state = acp_module_info.get_child_content('state')
+ if state == 'firmware_update_required':
+ # acp firmware version upgrade required
+ return True
+ return False
+
+ def sp_firmware_image_update_progress_get(self, node_name):
+ """
+ Get current firmware image update progress info
+ :return: Dictionary of firmware image update progress if query successful, else return None
+ """
+ firmware_update_progress_get = netapp_utils.zapi.NaElement('service-processor-image-update-progress-get')
+ firmware_update_progress_get.add_new_child('node', self.parameters['node'])
+
+ firmware_update_progress_info = dict()
+ try:
+ result = self.server.invoke_successfully(firmware_update_progress_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching firmware image upgrade progress details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ # return firmware image update progress details
+ if result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info'):
+ update_progress_info = result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info')
+ firmware_update_progress_info['is-in-progress'] = update_progress_info.get_child_content('is-in-progress')
+ firmware_update_progress_info['node'] = update_progress_info.get_child_content('node')
+ return firmware_update_progress_info
+
+ def shelf_firmware_info_get(self):
+ """
+ Get the current firmware of shelf module
+ :return:dict with module id and firmware info
+ """
+ shelf_id_fw_info = dict()
+ shelf_firmware_info_get = netapp_utils.zapi.NaElement('storage-shelf-info-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ storage_shelf_info = netapp_utils.zapi.NaElement('storage-shelf-info')
+ shelf_module = netapp_utils.zapi.NaElement('shelf-modules')
+ shelf_module_info = netapp_utils.zapi.NaElement('storage-shelf-module-info')
+ shelf_module.add_child_elem(shelf_module_info)
+ storage_shelf_info.add_child_elem(shelf_module)
+ desired_attributes.add_child_elem(storage_shelf_info)
+ shelf_firmware_info_get.add_child_elem(desired_attributes)
+
+ try:
+ result = self.server.invoke_successfully(shelf_firmware_info_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching shelf module firmware details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ shelf_info = result.get_child_by_name('attributes-list').get_child_by_name('storage-shelf-info')
+ if (shelf_info.get_child_by_name('shelf-modules') and
+ shelf_info.get_child_by_name('shelf-modules').get_child_by_name('storage-shelf-module-info')):
+ shelves = shelf_info['shelf-modules'].get_children()
+ for shelf in shelves:
+ shelf_id_fw_info[shelf.get_child_content('module-id')] = shelf.get_child_content('module-fw-revision')
+ return shelf_id_fw_info
+
+ def disk_firmware_info_get(self):
+ """
+ Get the current firmware of disks module
+ :return:
+ """
+ disk_id_fw_info = dict()
+ disk_firmware_info_get = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ storage_disk_info = netapp_utils.zapi.NaElement('storage-disk-info')
+ disk_inv = netapp_utils.zapi.NaElement('disk-inventory-info')
+ storage_disk_info.add_child_elem(disk_inv)
+ desired_attributes.add_child_elem(storage_disk_info)
+ disk_firmware_info_get.add_child_elem(desired_attributes)
+ try:
+ result = self.server.invoke_successfully(disk_firmware_info_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching disk module firmware details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ disk_info = result.get_child_by_name('attributes-list')
+ disks = disk_info.get_children()
+ for disk in disks:
+ disk_id_fw_info[disk.get_child_content('disk-uid')] = disk.get_child_by_name('disk-inventory-info').get_child_content('firmware-revision')
+ return disk_id_fw_info
+
+ def disk_firmware_required_get(self):
+ """
+ Check weather disk firmware upgrade is required or not
+ :return: True if the firmware upgrade is required
+ """
+ disk_firmware_info = self.disk_firmware_info_get()
+ for disk in disk_firmware_info:
+ if (disk_firmware_info[disk]) != self.parameters['disk_fw']:
+ return True
+ return False
+
+ def shelf_firmware_required_get(self):
+ """
+ Check weather shelf firmware upgrade is required or not
+ :return: True if the firmware upgrade is required
+ """
+ shelf_firmware_info = self.shelf_firmware_info_get()
+ for module in shelf_firmware_info:
+ if (shelf_firmware_info[module]) != self.parameters['shelf_module_fw']:
+ return True
+ return False
+
+ def sp_firmware_image_update(self):
+ """
+ Update current firmware image
+ """
+ firmware_update_info = netapp_utils.zapi.NaElement('service-processor-image-update')
+ if self.parameters.get('package') is not None:
+ firmware_update_info.add_new_child('package', self.parameters['package'])
+ if self.parameters.get('clear_logs') is not None:
+ firmware_update_info.add_new_child('clear-logs', str(self.parameters['clear_logs']))
+ if self.parameters.get('install_baseline_image') is not None:
+ firmware_update_info.add_new_child('install-baseline-image', str(self.parameters['install_baseline_image']))
+ firmware_update_info.add_new_child('node', self.parameters['node'])
+ firmware_update_info.add_new_child('update-type', self.parameters['update_type'])
+
+ try:
+ self.server.invoke_successfully(firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Current firmware version matches the version to be installed
+ if to_native(error.code) == '13001' and (error.message.startswith('Service Processor update skipped')):
+ return False
+ self.module.fail_json(msg='Error updating firmware image for %s: %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def shelf_firmware_upgrade(self):
+ """
+ Upgrade shelf firmware image
+ """
+ shelf_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-firmware-update')
+ try:
+ self.server.invoke_successfully(shelf_firmware_update_info, enable_tunneling=True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating shelf firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+
+ def acp_firmware_upgrade(self):
+
+ """
+ Upgrade shelf firmware image
+ """
+ acp_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-acp-firmware-update')
+ acp_firmware_update_info.add_new_child('node-name', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(acp_firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating acp firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+
+ def disk_firmware_upgrade(self):
+
+ """
+ Upgrade disk firmware
+ """
+ disk_firmware_update_info = netapp_utils.zapi.NaElement('disk-update-disk-fw')
+ disk_firmware_update_info.add_new_child('node-name', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(disk_firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating disk firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ return True
+
+ def download_firmware(self):
+ ''' calls the system-cli ZAPI as there is no ZAPI for this feature '''
+ msg = MSGS['dl_completed']
+ command = ['storage', 'firmware', 'download', '-node', self.parameters['node'] if self.parameters.get('node') else '*',
+ '-package-url', self.parameters['package_url']]
+ command_obj = netapp_utils.zapi.NaElement("system-cli")
+
+ args_obj = netapp_utils.zapi.NaElement("args")
+ for arg in command:
+ args_obj.add_new_child('arg', arg)
+ command_obj.add_child_elem(args_obj)
+ command_obj.add_new_child('priv', 'advanced')
+
+ output = None
+ try:
+ output = self.server.invoke_successfully(command_obj, True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ # with nettap_lib, error.code may be a number or a string
+ try:
+ err_num = int(error.code)
+ except ValueError:
+ err_num = -1
+ if err_num == 60: # API did not finish on time
+ # even if the ZAPI reports a timeout error, it does it after the command completed
+ msg = MSGS['dl_completed_slowly']
+ elif err_num == 502 and not self.parameters['fail_on_502_error']: # Bad Gateway
+ # ONTAP proxy breaks the connection after 5 minutes, we can assume the download is progressing slowly
+ msg = MSGS['dl_in_progress']
+ else:
+ self.module.fail_json(msg='Error running command %s: %s' % (command, to_native(error)),
+ exception=traceback.format_exc())
+ except netapp_utils.zapi.etree.XMLSyntaxError as error:
+ self.module.fail_json(msg='Error decoding output from command %s: %s' % (command, to_native(error)),
+ exception=traceback.format_exc())
+
+ if output is not None:
+ # command completed, check for success
+ status = output.get_attr('status')
+ cli_output = output.get_child_content('cli-output')
+ if status is None or status != 'passed' or cli_output is None or cli_output == "":
+ if status is None:
+ extra_info = "'status' attribute missing"
+ elif status != 'passed':
+ extra_info = "check 'status' value"
+ else:
+ extra_info = 'check console permissions'
+ self.module.fail_json(msg='unable to download package from %s: %s. Received: %s' %
+ (self.parameters['package_url'], extra_info, output.to_string()))
+
+ if cli_output is not None:
+ if cli_output.startswith('Error:') or \
+ 'Failed to download package from' in cli_output:
+ self.module.fail_json(msg='failed to download package from %s: %s' % (self.parameters['package_url'], cli_output))
+ msg += " Extra info: %s" % cli_output
+
+ return msg
+
+ def download_sp_image(self):
+ fetch_package = netapp_utils.zapi.NaElement('system-image-fetch-package')
+ fetch_package.add_new_child('node', self.parameters['node'])
+ fetch_package.add_new_child('package', self.parameters['package_url'])
+ if self.parameters.get('rename_package'):
+ fetch_package.add_new_child('rename-package', self.parameters['rename_package'])
+ if self.parameters.get('replace_package'):
+ fetch_package.add_new_child('replace-package', str(self.parameters['replace_package']))
+ try:
+ self.server.invoke_successfully(fetch_package, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching system image package from %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def download_sp_image_progress(self):
+ progress = netapp_utils.zapi.NaElement('system-image-update-progress-get')
+ progress.add_new_child('node', self.parameters['node'])
+ progress_info = dict()
+ try:
+ result = self.server.invoke_successfully(progress, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching system image package download progress: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('phase'):
+ progress_info['phase'] = result.get_child_content('phase')
+ else:
+ progress_info['phase'] = None
+ if result.get_child_by_name('exit-message'):
+ progress_info['exit_message'] = result.get_child_content('exit-message')
+ else:
+ progress_info['exit_message'] = None
+ if result.get_child_by_name('exit-status'):
+ progress_info['exit_status'] = result.get_child_content('exit-status')
+ else:
+ progress_info['exit_status'] = None
+ if result.get_child_by_name('last-message'):
+ progress_info['last_message'] = result.get_child_content('last-message')
+ else:
+ progress_info['last_message'] = None
+ if result.get_child_by_name('run-status'):
+ progress_info['run_status'] = result.get_child_content('run-status')
+ else:
+ progress_info['run_status'] = None
+ return progress_info
+
+ def reboot_sp(self):
+ reboot = netapp_utils.zapi.NaElement('service-processor-reboot')
+ reboot.add_new_child('node', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(reboot, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error rebooting service processor: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def download_sp_firmware(self):
+ if self.parameters.get('reboot_sp'):
+ self.reboot_sp()
+ self.download_sp_image()
+ progress = self.download_sp_image_progress()
+ # progress only show the current or most recent update/install operation.
+ if progress['phase'] == 'Download':
+ while progress['run_status'] is not None and progress['run_status'] != 'Exited':
+ time.sleep(10)
+ progress = self.download_sp_image_progress()
+ if progress['exit_status'] != 'Success':
+ self.module.fail_json(msg=progress['exit_message'], exception=traceback.format_exc())
+ return MSGS['dl_completed']
+ return MSGS['no_action']
+
+ def autosupport_log(self):
+ """
+ Autosupport log for software_update
+ :return:
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_firmware_upgrade", cserver)
+
+ def apply(self):
+ """
+ Apply action to upgrade firmware
+ """
+ changed = False
+ msg = MSGS['no_action']
+ self.autosupport_log()
+ firmware_update_progress = dict()
+ if self.parameters.get('package_url'):
+ if not self.module.check_mode:
+ if self.parameters.get('firmware_type') == 'service-processor':
+ msg = self.download_sp_firmware()
+ else:
+ msg = self.download_firmware()
+ changed = True
+ if not self.parameters['force_disruptive_update']:
+ # disk_qual, disk, shelf, and ACP are automatically updated in background
+ # The SP firmware is automatically updated on reboot
+ self.module.exit_json(changed=changed, msg=msg)
+ if msg == MSGS['dl_in_progress']:
+ # can't force an update if the software is still downloading
+ self.module.fail_json(msg="Cannot force update: %s" % msg)
+ if self.parameters.get('firmware_type') == 'service-processor':
+ # service-processor firmware upgrade
+ current = self.firmware_image_get(self.parameters['node'])
+
+ if self.parameters.get('state') == 'present' and current:
+ if not self.module.check_mode:
+ if self.sp_firmware_image_update():
+ changed = True
+ firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node'])
+ while firmware_update_progress.get('is-in-progress') == 'true':
+ time.sleep(25)
+ firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node'])
+ else:
+ # we don't know until we try the upgrade
+ changed = True
+
+ elif self.parameters.get('firmware_type') == 'shelf':
+ # shelf firmware upgrade
+ if self.parameters.get('shelf_module_fw'):
+ if self.shelf_firmware_required_get():
+ if not self.module.check_mode:
+ changed = self.shelf_firmware_upgrade()
+ else:
+ changed = True
+ else:
+ if not self.module.check_mode:
+ changed = self.shelf_firmware_upgrade()
+ else:
+ # we don't know until we try the upgrade -- assuming the worst
+ changed = True
+ elif self.parameters.get('firmware_type') == 'acp':
+ # acp firmware upgrade
+ if self.acp_firmware_required_get():
+ if not self.module.check_mode:
+ self.acp_firmware_upgrade()
+ changed = True
+ elif self.parameters.get('firmware_type') == 'disk':
+ # Disk firmware upgrade
+ if self.parameters.get('disk_fw'):
+ if self.disk_firmware_required_get():
+ if not self.module.check_mode:
+ changed = self.disk_firmware_upgrade()
+ else:
+ changed = True
+ else:
+ if not self.module.check_mode:
+ changed = self.disk_firmware_upgrade()
+ else:
+ # we don't know until we try the upgrade -- assuming the worst
+ changed = True
+
+ self.module.exit_json(changed=changed, msg='forced update for %s' % self.parameters.get('firmware_type'))
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPFirmwareUpgrade()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py
new file mode 100644
index 00000000..9dea364b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py
@@ -0,0 +1,470 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP FlexCache - create/delete relationship
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete FlexCache volume relationships
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_flexcache
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified relationship should exist or not.
+ default: present
+ type: str
+ origin_volume:
+ description:
+ - Name of the origin volume for the FlexCache.
+ - Required for creation.
+ type: str
+ origin_vserver:
+ description:
+ - Name of the origin vserver for the FlexCache.
+ - Required for creation.
+ type: str
+ origin_cluster:
+ description:
+ - Name of the origin cluster for the FlexCache.
+ - Defaults to cluster associated with target vserver if absent.
+ - Not used for creation.
+ type: str
+ volume:
+ description:
+ - Name of the target volume for the FlexCache.
+ required: true
+ type: str
+ junction_path:
+ description:
+ - Junction path of the cache volume.
+ type: str
+ auto_provision_as:
+ description:
+ - Use this parameter to automatically select existing aggregates for volume provisioning.Eg flexgroup
+ - Note that the fastest aggregate type with at least one aggregate on each node of the cluster will be selected.
+ type: str
+ size:
+ description:
+ - Size of cache volume.
+ type: int
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: gb
+ vserver:
+ description:
+ - Name of the target vserver for the FlexCache.
+ - Note that hostname, username, password are intended for the target vserver.
+ required: true
+ type: str
+ aggr_list:
+ description:
+ - List of aggregates to host target FlexCache volume.
+ type: list
+ elements: str
+ aggr_list_multiplier:
+ description:
+ - Aggregate list repeat count.
+ type: int
+ force_unmount:
+ description:
+ - Unmount FlexCache volume. Delete the junction path at which the volume is mounted before deleting the FlexCache relationship.
+ type: bool
+ default: false
+ force_offline:
+ description:
+ - Offline FlexCache volume before deleting the FlexCache relationship.
+ - The volume will be destroyed and data can be lost.
+ type: bool
+ default: false
+ time_out:
+ description:
+ - time to wait for flexcache creation or deletion in seconds
+ - if 0, the request is asynchronous
+ - default is set to 3 minutes
+ type: int
+ default: 180
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexCache
+ na_ontap_FlexCache:
+ state: present
+ origin_volume: test_src
+ volume: test_dest
+ origin_vserver: ansible_src
+ vserver: ansible_dest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete FlexCache
+ na_ontap_FlexCache:
+ state: absent
+ volume: test_dest
+ vserver: ansible_dest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPFlexCache(object):
+ """
+ Class with FlexCache methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'],
+ default='present'),
+ origin_volume=dict(required=False, type='str'),
+ origin_vserver=dict(required=False, type='str'),
+ origin_cluster=dict(required=False, type='str'),
+ auto_provision_as=dict(required=False, type='str'),
+ volume=dict(required=True, type='str'),
+ junction_path=dict(required=False, type='str'),
+ size=dict(required=False, type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ vserver=dict(required=True, type='str'),
+ aggr_list=dict(required=False, type='list', elements='str'),
+ aggr_list_multiplier=dict(required=False, type='int'),
+ force_offline=dict(required=False, type='bool', default=False),
+ force_unmount=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=180),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ('aggr_list', 'auto_provision_as'),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * \
+ netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+ # setup later if required
+ self.origin_server = None
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def add_parameter_to_dict(self, adict, name, key=None, tostr=False):
+ ''' add defined parameter (not None) to adict using key '''
+ if key is None:
+ key = name
+ if self.parameters.get(name) is not None:
+ if tostr:
+ adict[key] = str(self.parameters.get(name))
+ else:
+ adict[key] = self.parameters.get(name)
+
+ def get_job(self, jobid, server):
+ """
+ Get job details by id
+ """
+ job_get = netapp_utils.zapi.NaElement('job-get')
+ job_get.add_new_child('job-id', jobid)
+ try:
+ result = server.invoke_successfully(job_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # Not found
+ return None
+ self.module.fail_json(msg='Error fetching job info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ results = dict()
+ job_info = result.get_child_by_name('attributes').get_child_by_name('job-info')
+ results = {
+ 'job-progress': job_info['job-progress'],
+ 'job-state': job_info['job-state']
+ }
+ if job_info.get_child_by_name('job-completion') is not None:
+ results['job-completion'] = job_info['job-completion']
+ else:
+ results['job-completion'] = None
+ return results
+
+ def check_job_status(self, jobid):
+ """
+ Loop until job is complete
+ """
+ server = self.server
+ sleep_time = 5
+ time_out = self.parameters['time_out']
+ while time_out > 0:
+ results = self.get_job(jobid, server)
+ # If running as cluster admin, the job is owned by cluster vserver
+ # rather than the target vserver.
+ if results is None and server == self.server:
+ results = netapp_utils.get_cserver(self.server)
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ continue
+ if results is None:
+ error = 'cannot locate job with id: %d' % jobid
+ break
+ if results['job-state'] in ('queued', 'running'):
+ time.sleep(sleep_time)
+ time_out -= sleep_time
+ continue
+ if results['job-state'] in ('success', 'failure'):
+ break
+ else:
+ self.module.fail_json(msg='Unexpected job status in: %s' % repr(results))
+
+ if results is not None:
+ if results['job-state'] == 'success':
+ error = None
+ elif results['job-state'] in ('queued', 'running'):
+ error = 'job completion exceeded expected timer of: %s seconds' % \
+ self.parameters['time_out']
+ else:
+ if results['job-completion'] is not None:
+ error = results['job-completion']
+ else:
+ error = results['job-progress']
+ return error
+
+ def flexcache_get_iter(self):
+ """
+ Compose NaElement object to query current FlexCache relation
+ """
+ options = {'volume': self.parameters['volume']}
+ self.add_parameter_to_dict(options, 'origin_volume', 'origin-volume')
+ self.add_parameter_to_dict(options, 'origin_vserver', 'origin-vserver')
+ self.add_parameter_to_dict(options, 'origin_cluster', 'origin-cluster')
+ flexcache_info = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'flexcache-info', **options)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(flexcache_info)
+ flexcache_get_iter = netapp_utils.zapi.NaElement('flexcache-get-iter')
+ flexcache_get_iter.add_child_elem(query)
+ return flexcache_get_iter
+
+ def flexcache_get(self):
+ """
+ Get current FlexCache relations
+ :return: Dictionary of current FlexCache details if query successful, else None
+ """
+ flexcache_get_iter = self.flexcache_get_iter()
+ flex_info = dict()
+ try:
+ result = self.server.invoke_successfully(flexcache_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching FlexCache info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ flexcache_info = result.get_child_by_name('attributes-list') \
+ .get_child_by_name('flexcache-info')
+ flex_info['origin_cluster'] = flexcache_info.get_child_content('origin-cluster')
+ flex_info['origin_volume'] = flexcache_info.get_child_content('origin-volume')
+ flex_info['origin_vserver'] = flexcache_info.get_child_content('origin-vserver')
+ flex_info['size'] = flexcache_info.get_child_content('size')
+ flex_info['volume'] = flexcache_info.get_child_content('volume')
+ flex_info['vserver'] = flexcache_info.get_child_content('vserver')
+ flex_info['auto_provision_as'] = flexcache_info.get_child_content('auto-provision-as')
+
+ return flex_info
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 1:
+ msg = 'Multiple records found for %s:' % self.parameters['volume']
+ self.module.fail_json(msg='Error fetching FlexCache info: %s' % msg)
+ return None
+
+ def flexcache_create_async(self):
+ """
+ Create a FlexCache relationship
+ """
+ options = {'origin-volume': self.parameters['origin_volume'],
+ 'origin-vserver': self.parameters['origin_vserver'],
+ 'volume': self.parameters['volume']}
+ self.add_parameter_to_dict(options, 'junction_path', 'junction-path')
+ self.add_parameter_to_dict(options, 'auto_provision_as', 'auto-provision-as')
+ self.add_parameter_to_dict(options, 'size', 'size', tostr=True)
+ if self.parameters.get('aggr_list'):
+ if self.parameters.get('aggr_list_multiplier'):
+ self.tobytes_aggr_list_multiplier = bytes(self.parameters['aggr_list_multiplier'])
+ self.add_parameter_to_dict(options, 'tobytes_aggr_list_multiplier', 'aggr-list-multiplier')
+ flexcache_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'flexcache-create-async', **options)
+ if self.parameters.get('aggr_list'):
+ aggregates = netapp_utils.zapi.NaElement('aggr-list')
+ for aggregate in self.parameters['aggr_list']:
+ aggregates.add_new_child('aggr-name', aggregate)
+ flexcache_create.add_child_elem(aggregates)
+ try:
+ result = self.server.invoke_successfully(flexcache_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating FlexCache %s' % to_native(error),
+ exception=traceback.format_exc())
+ results = dict()
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+ return results
+
+ def flexcache_create(self):
+ """
+ Create a FlexCache relationship
+ Check job status
+ """
+ results = self.flexcache_create_async()
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ # asynchronous call, assuming success!
+ return
+ error = self.check_job_status(results['result-jobid'])
+ if error is None:
+ return
+ else:
+ self.module.fail_json(msg='Error when creating flexcache: %s' % error)
+ self.module.fail_json(msg='Unexpected error when creating flexcache: results is: %s' % repr(results))
+
+ def flexcache_delete_async(self):
+ """
+ Delete FlexCache relationship at destination cluster
+ """
+ options = {'volume': self.parameters['volume']}
+ flexcache_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'flexcache-destroy-async', **options)
+ try:
+ result = self.server.invoke_successfully(flexcache_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting FlexCache : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ results = dict()
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+ return results
+
+ def volume_offline(self):
+ """
+ Offline FlexCache volume at destination cluster
+ """
+ options = {'name': self.parameters['volume']}
+ xml = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline', **options)
+ try:
+ self.server.invoke_successfully(xml, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error offlining FlexCache volume: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def volume_unmount(self):
+ """
+ Unmount FlexCache volume at destination cluster
+ """
+ options = {'volume-name': self.parameters['volume']}
+ xml = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **options)
+ try:
+ self.server.invoke_successfully(xml, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error unmounting FlexCache volume: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def flexcache_delete(self):
+ """
+ Delete FlexCache relationship at destination cluster
+ Check job status
+ """
+ if self.parameters['force_unmount']:
+ self.volume_unmount()
+ if self.parameters['force_offline']:
+ self.volume_offline()
+ results = self.flexcache_delete_async()
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ # asynchronous call, assuming success!
+ return
+ error = self.check_job_status(results['result-jobid'])
+ if error is None:
+ return
+ else:
+ self.module.fail_json(msg='Error when deleting flexcache: %s' % error)
+ self.module.fail_json(msg='Unexpected error when deleting flexcache: results is: %s' % repr(results))
+
+ def check_parameters(self):
+ """
+ Validate parameters and fail if one or more required params are missing
+ """
+ missings = list()
+ expected = ('origin_volume', 'origin_vserver')
+ if self.parameters['state'] == 'present':
+ for param in expected:
+ if not self.parameters.get(param):
+ missings.append(param)
+ if missings:
+ plural = 's' if len(missings) > 1 else ''
+ msg = 'Missing parameter%s: %s' % (plural, ', '.join(missings))
+ self.module.fail_json(msg=msg)
+
+ def apply(self):
+ """
+ Apply action to FlexCache
+ """
+ netapp_utils.ems_log_event("na_ontap_flexcache", self.server)
+ current = self.flexcache_get()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.check_parameters()
+ self.flexcache_create()
+ elif cd_action == 'delete':
+ self.flexcache_delete()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPFlexCache()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py
new file mode 100644
index 00000000..97c6be5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+''' this is igroup module
+
+ (c) 2018-2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_igroup
+short_description: NetApp ONTAP iSCSI or FC igroup configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create/Delete/Rename Igroups and Modify initiators belonging to an igroup
+
+options:
+ state:
+ description:
+ - Whether the specified Igroup should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the igroup to manage.
+ required: true
+ type: str
+
+ initiator_group_type:
+ description:
+ - Type of the initiator group.
+ - Required when C(state=present).
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+
+ from_name:
+ description:
+ - Name of igroup to rename to name.
+ version_added: 2.7.0
+ type: str
+
+ os_type:
+ description:
+ - OS type of the initiators within the group.
+ type: str
+ aliases: ['ostype']
+
+ initiators:
+ description:
+ - List of initiators to be mapped to the igroup.
+ - WWPN, WWPN Alias, or iSCSI name of Initiator to add or remove.
+ - For a modify operation, this list replaces the exisiting initiators
+ - This module does not add or remove specific initiator(s) in an igroup
+ aliases:
+ - initiator
+ type: list
+ elements: str
+
+ bind_portset:
+ description:
+ - Name of a current portset to bind to the newly created igroup.
+ type: str
+
+ force_remove_initiator:
+ description:
+ - Forcibly remove the initiator even if there are existing LUNs mapped to this initiator group.
+ type: bool
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Create iSCSI Igroup
+ na_ontap_igroup:
+ state: present
+ name: ansibleIgroup3
+ initiator_group_type: iscsi
+ os_type: linux
+ initiators: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com,abc.com:redhat.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create FC Igroup
+ na_ontap_igroup:
+ state: present
+ name: ansibleIgroup4
+ initiator_group_type: fcp
+ os_type: linux
+ initiators: 20:00:00:50:56:9f:19:82
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: rename Igroup
+ na_ontap_igroup:
+ state: present
+ from_name: ansibleIgroup3
+ name: testexamplenewname
+ initiator_group_type: iscsi
+ os_type: linux
+ initiators: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify Igroup Initiators (replaces exisiting initiators)
+ na_ontap_igroup:
+ state: present
+ name: ansibleIgroup3
+ initiator_group_type: iscsi
+ os_type: linux
+ initiator: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete Igroup
+ na_ontap_igroup:
+ state: absent
+ name: ansibleIgroup3
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapIgroup(object):
+ """Create/Delete/Rename Igroups and Modify initiators list"""
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str', default=None),
+ os_type=dict(required=False, type='str', aliases=['ostype']),
+ initiator_group_type=dict(required=False, type='str',
+ choices=['fcp', 'iscsi', 'mixed']),
+ initiators=dict(required=False, type='list', elements='str', aliases=['initiator']),
+ vserver=dict(required=True, type='str'),
+ force_remove_initiator=dict(required=False, type='bool', default=False),
+ bind_portset=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.module.params.get('initiators') is not None:
+ self.parameters['initiators'] = [self.na_helper.sanitize_wwn(initiator)
+ for initiator in self.module.params['initiators']]
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_igroup(self, name):
+ """
+ Return details about the igroup
+ :param:
+ name : Name of the igroup
+
+ :return: Details about the igroup. None if not found.
+ :rtype: dict
+ """
+ igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter')
+ attributes = dict(query={'initiator-group-info': {'initiator-group-name': name,
+ 'vserver': self.parameters['vserver']}})
+ igroup_info.translate_struct(attributes)
+ result, current = None, None
+
+ try:
+ result = self.server.invoke_successfully(igroup_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ igroup = result.get_child_by_name('attributes-list').get_child_by_name('initiator-group-info')
+ initiators = []
+ if igroup.get_child_by_name('initiators'):
+ current_initiators = igroup['initiators'].get_children()
+ for initiator in current_initiators:
+ initiators.append(initiator['initiator-name'])
+ current = {
+ 'initiators': initiators
+ }
+
+ return current
+
+ def add_initiators(self):
+ """
+ Add the list of initiators to igroup
+ :return: None
+ """
+ # don't add if initiators is empty string
+ if self.parameters.get('initiators') == [''] or self.parameters.get('initiators') is None:
+ return
+ for initiator in self.parameters['initiators']:
+ self.modify_initiator(initiator, 'igroup-add')
+
+ def remove_initiators(self, initiators):
+ """
+ Removes all existing initiators from igroup
+ :return: None
+ """
+ for initiator in initiators:
+ self.modify_initiator(initiator, 'igroup-remove')
+
+ def modify_initiator(self, initiator, zapi):
+ """
+ Add or remove an initiator to/from an igroup
+ """
+ options = {'initiator-group-name': self.parameters['name'],
+ 'initiator': initiator}
+
+ igroup_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(igroup_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_igroup(self):
+ """
+ Create the igroup.
+ """
+ options = {'initiator-group-name': self.parameters['name']}
+ if self.parameters.get('os_type') is not None:
+ options['os-type'] = self.parameters['os_type']
+ if self.parameters.get('initiator_group_type') is not None:
+ options['initiator-group-type'] = self.parameters['initiator_group_type']
+ if self.parameters.get('bind_portset') is not None:
+ options['bind-portset'] = self.parameters['bind_portset']
+
+ igroup_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-create', **options)
+
+ try:
+ self.server.invoke_successfully(igroup_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error provisioning igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.add_initiators()
+
+ def delete_igroup(self):
+ """
+ Delete the igroup.
+ """
+ igroup_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-destroy', **{'initiator-group-name': self.parameters['name'],
+ 'force': 'true' if self.parameters['force_remove_initiator'] else 'false'})
+
+ try:
+ self.server.invoke_successfully(igroup_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_igroup(self):
+ """
+ Rename the igroup.
+ """
+ igroup_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-rename', **{'initiator-group-name': self.parameters['from_name'],
+ 'initiator-group-new-name': str(self.parameters['name'])})
+ try:
+ self.server.invoke_successfully(igroup_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ netapp_utils.ems_log_event("na_ontap_igroup", self.server)
+
+ def apply(self):
+ self.autosupport_log()
+ current = self.get_igroup(self.parameters['name'])
+ # rename and create are mutually exclusive
+ rename, cd_action, modify = None, None, None
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_igroup(self.parameters['from_name']), current)
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_igroup()
+ elif cd_action == 'create':
+ self.create_igroup()
+ elif cd_action == 'delete':
+ self.delete_igroup()
+ if modify:
+ self.remove_initiators(current['initiators'])
+ self.add_initiators()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapIgroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py
new file mode 100644
index 00000000..0525cae4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+''' This is an Ansible module for ONTAP, to manage initiators in an Igroup
+
+ (c) 2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_igroup_initiator
+short_description: NetApp ONTAP igroup initiator configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add/Remove initiators from an igroup
+
+options:
+ state:
+ description:
+ - Whether the specified initiator should exist or not in an igroup.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ names:
+ description:
+ - List of initiators to manage.
+ required: true
+ aliases:
+ - name
+ type: list
+ elements: str
+
+ initiator_group:
+ description:
+ - Name of the initiator group to which the initiator belongs.
+ required: true
+ type: str
+
+ force_remove:
+ description:
+ - Forcibly remove the initiators even if there are existing LUNs mapped to the initiator group.
+ type: bool
+ default: false
+ version_added: '20.1.0'
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Add initiators to an igroup
+ na_ontap_igroup_initiator:
+ names: abc.test:def.com,def.test:efg.com
+ initiator_group: test_group
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Remove an initiator from an igroup
+ na_ontap_igroup_initiator:
+ state: absent
+ names: abc.test:def.com
+ initiator_group: test_group
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapIgroupInitiator(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ names=dict(required=True, type='list', elements='str', aliases=['name']),
+ initiator_group=dict(required=True, type='str'),
+ force_remove=dict(required=False, type='bool', default=False),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_initiators(self):
+ """
+ Get the existing list of initiators from an igroup
+ :rtype: list() or None
+ """
+ igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter')
+ attributes = dict(query={'initiator-group-info': {'initiator-group-name': self.parameters['initiator_group'],
+ 'vserver': self.parameters['vserver']}})
+ igroup_info.translate_struct(attributes)
+ result, current = None, []
+
+ try:
+ result = self.server.invoke_successfully(igroup_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['initiator_group'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ igroup_info = result.get_child_by_name('attributes-list').get_child_by_name('initiator-group-info')
+ if igroup_info.get_child_by_name('initiators') is not None:
+ current = [initiator['initiator-name'] for initiator in igroup_info['initiators'].get_children()]
+ return current
+
+ def modify_initiator(self, initiator_name, zapi):
+ """
+ Add or remove an initiator to/from an igroup
+ """
+ options = {'initiator-group-name': self.parameters['initiator_group'],
+ 'initiator': initiator_name,
+ 'force': 'true' if zapi == 'igroup-remove' and self.parameters['force_remove'] else 'false'}
+ initiator_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(initiator_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (initiator_name,
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ netapp_utils.ems_log_event("na_ontap_igroup_initiator", self.server)
+
+ def apply(self):
+ self.autosupport_log()
+ initiators = self.get_initiators()
+ for initiator in self.parameters['names']:
+ present = None
+ initiator = self.na_helper.sanitize_wwn(initiator)
+ if initiator in initiators:
+ present = True
+ cd_action = self.na_helper.get_cd_action(present, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.modify_initiator(initiator, 'igroup-add')
+ elif cd_action == 'delete':
+ self.modify_initiator(initiator, 'igroup-remove')
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapIgroupInitiator()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py
new file mode 100644
index 00000000..8336409d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py
@@ -0,0 +1,1787 @@
+#!/usr/bin/python
+
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_info
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_info
+author: Piotr Olczak (@dprts) <polczak@redhat.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+short_description: NetApp information gatherer
+description:
+ - This module allows you to gather various information about ONTAP configuration
+version_added: 2.9.0
+requirements:
+ - netapp_lib
+options:
+ state:
+ type: str
+ description:
+ - Returns "info"
+ default: "info"
+ choices: ['info']
+ vserver:
+ type: str
+ description:
+ - If present, 'vserver tunneling' will limit the output to the vserver scope.
+ - Note that not all subsets are supported on a vserver, and 'all' will trigger an error.
+ version_added: '19.11.0'
+ gather_subset:
+ type: list
+ elements: str
+ description:
+ - When supplied, this argument will restrict the information collected
+ to a given subset. Possible values for this argument include
+ "aggregate_info",
+ "aggr_efficiency_info",
+ "cifs_options_info",
+ "cifs_server_info",
+ "cifs_share_info",
+ "cifs_vserver_security_info",
+ "cluster_identity_info",
+ "cluster_image_info",
+ "cluster_log_forwarding_info",
+ "cluster_node_info",
+ "cluster_peer_info",
+ "cluster_switch_info",
+ "clock_info",
+ "disk_info",
+ "env_sensors_info",
+ "event_notification_destination_info",
+ "event_notification_info",
+ "export_policy_info",
+ "export_rule_info",
+ "fcp_adapter_info",
+ "fcp_alias_info",
+ "fcp_service_info",
+ "igroup_info",
+ "iscsi_service_info",
+ "job_schedule_cron_info",
+ "kerberos_realm_info",
+ "ldap_client",
+ "ldap_config",
+ "license_info",
+ "lun_info",
+ "lun_map_info",
+ "metrocluster_check_info",
+ "metrocluster_info",
+ "metrocluster_node_info",
+ "net_dev_discovery_info",
+ "net_dns_info",
+ "net_failover_group_info",
+ "net_firewall_info",
+ "net_ifgrp_info",
+ "net_interface_info",
+ "net_interface_service_policy_info",
+ "net_ipspaces_info",
+ "net_port_info",
+ "net_port_broadcast_domain_info",
+ "net_routes_info",
+ "net_vlan_info",
+ "nfs_info",
+ "ntfs_dacl_info",
+ "ntfs_sd_info",
+ "ntp_server_info",
+ "nvme_info",
+ "nvme_interface_info",
+ "nvme_namespace_info",
+ "nvme_subsystem_info",
+ "ontap_system_version",
+ "ontap_version",
+ "ontapi_version",
+ "qos_adaptive_policy_info",
+ "qos_policy_info",
+ "qtree_info",
+ "quota_report_info",
+ "role_info",
+ "security_key_manager_key_info",
+ "security_login_account_info",
+ "security_login_role_config_info",
+ "security_login_role_info",
+ "service_processor_info",
+ "service_processor_network_info",
+ "shelf_info"
+ "sis_info",
+ "sis_policy_info",
+ "snapmirror_info",
+ "snapmirror_destination_info",
+ "snapmirror_policy_info",
+ "snapshot_info",
+ "snapshot_policy_info",
+ "storage_failover_info",
+ "storage_bridge_info",
+ "subsys_health_info",
+ "sysconfig_info",
+ "sys_cluster_alerts",
+ "volume_info",
+ "volume_space_info",
+ "vscan_info",
+ "vscan_status_info",
+ "vscan_scanner_pool_info",
+ "vscan_connection_status_all_info",
+ "vscan_connection_extended_stats_info",
+ "vserver_info",
+ "vserver_login_banner_info",
+ "vserver_motd_info",
+ "vserver_nfs_info",
+ "vserver_peer_info",
+ Can specify a list of values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ - nvme is supported with ONTAP 9.4 onwards.
+ - use "help" to get a list of supported information for your system.
+ default: "all"
+ max_records:
+ type: int
+ description:
+ - Maximum number of records returned in a single ZAPI call. Valid range is [1..2^32-1].
+ This parameter controls internal behavior of this module.
+ default: 1024
+ version_added: '20.2.0'
+ summary:
+ description:
+ - Boolean flag to control return all attributes of the module info or only the names.
+ - If true, only names are returned.
+ default: false
+ type: bool
+ version_added: '20.4.0'
+ volume_move_target_aggr_info:
+ description:
+ - Required options for volume_move_target_aggr_info
+ type: dict
+ version_added: '20.5.0'
+ suboptions:
+ volume_name:
+ description:
+ - Volume name to get target aggr info for
+ required: true
+ type: str
+ version_added: '20.5.0'
+ vserver:
+ description:
+ - vserver the Volume lives on
+ required: true
+ type: str
+ version_added: '20.5.0'
+ desired_attributes:
+ description:
+ - Advanced feature requiring to understand ZAPI internals.
+ - Allows to request a specific attribute that is not returned by default, or to limit the returned attributes.
+ - A dictionary for the zapi desired-attributes element.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - Only a single subset can be called at a time if this option is set.
+ - It is the caller responsibity to make sure key attributes are present in the right position.
+ - The module will error out if any key attribute is missing.
+ type: dict
+ version_added: '20.6.0'
+ query:
+ description:
+ - Advanced feature requiring to understand ZAPI internals.
+ - Allows to specify which objects to return.
+ - A dictionary for the zapi query element.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - Only a single subset can be called at a time if this option is set.
+ type: dict
+ version_added: '20.7.0'
+ use_native_zapi_tags:
+ description:
+ - By default, I(-) in the returned dictionary keys are translated to I(_).
+ - If set to true, the translation is disabled.
+ type: bool
+ default: false
+ version_added: '20.6.0'
+ continue_on_error:
+ description:
+ - By default, this module fails on the first error.
+ - This option allows to provide a list of errors that are not failing the module.
+ - Errors in the list are reported in the output, under the related info element, as an "error" entry.
+ - Possible values are always, never, missing_vserver_api_error, rpc_error, other_error.
+ - missing_vserver_api_error - most likely the API is available at cluster level but not vserver level.
+ - rpc_error - some queries are failing because the node cannot reach another node in the cluster.
+ - key_error - a query is failing because the returned data does not contain an expected key.
+ - for key errors, make sure to report this in Slack. It may be a change in a new ONTAP version.
+ - other_error - anything not in the above list.
+ - always will continue on any error, never will fail on any error, they cannot be used with any other keyword.
+ type: list
+ elements: str
+ default: never
+'''
+
+EXAMPLES = '''
+- name: Get NetApp info as Cluster Admin (Password Authentication)
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ register: ontap_info
+- debug:
+ msg: "{{ ontap_info.ontap_info }}"
+
+- name: Get NetApp version as Vserver admin
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "vsadmin"
+ vserver: trident_svm
+ password: "vsadmins_password"
+
+- name: run ontap info module using vserver tunneling and ignoring errors
+ na_ontap_info:
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ vserver: trident_svm
+ summary: true
+ continue_on_error:
+ - missing_vserver_api_error
+ - rpc_error
+
+- name: Limit Info Gathering to Aggregate Information as Cluster Admin
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: "aggregate_info"
+ register: ontap_info
+
+- name: Limit Info Gathering to Volume and Lun Information as Cluster Admin
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - volume_info
+ - lun_info
+ register: ontap_info
+
+- name: Gather all info except for volume and lun information as Cluster Admin
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - "!volume_info"
+ - "!lun_info"
+ register: ontap_info
+
+- name: Gather Volume move information for a specific volume
+ na_ontap_info:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: volume_move_target_aggr_info
+ volume_move_target_aggr_info:
+ volume_name: carchitest
+ vserver: ansible
+
+- name: run ontap info module for aggregate module, requesting specific fields
+ na_ontap_info:
+ # <<: *login
+ gather_subset: aggregate_info
+ desired_attributes:
+ aggr-attributes:
+ aggr-inode-attributes:
+ files-private-used:
+ aggr-raid-attributes:
+ aggregate-type:
+ use_native_zapi_tags: true
+ register: ontap
+- debug: var=ontap
+
+- name: run ontap info to get offline volumes with dp in the name
+ na_ontap_info:
+ # <<: *cert_login
+ gather_subset: volume_info
+ query:
+ volume-attributes:
+ volume-id-attributes:
+ name: '*dp*'
+ volume-state-attributes:
+ state: offline
+ desired_attributes:
+ volume-attributes:
+ volume-id-attributes:
+ name:
+ volume-state-attributes:
+ state:
+ register: ontap
+- debug: var=ontap
+'''
+
+RETURN = '''
+ontap_info:
+ description: Returns various information about NetApp cluster configuration
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_info": {
+ "aggregate_info": {...},
+ "cluster_identity_info": {...},
+ "cluster_image_info": {...},
+ "cluster_node_info": {...},
+ "igroup_info": {...},
+ "iscsi_service_info": {...},
+ "license_info": {...},
+ "lun_info": {...},
+ "metrocluster_check_info": {...},
+ "metrocluster_info": {...},
+ "metrocluster_node_info": {...},
+ "net_dns_info": {...},
+ "net_ifgrp_info": {...},
+ "net_interface_info": {...},
+ "net_interface_service_policy_info": {...},
+ "net_port_info": {...},
+ "ontap_system_version": {...},
+ "ontap_version": {...},
+ "ontapi_version": {...},
+ "qos_policy_info": {...},
+ "qos_adaptive_policy_info": {...},
+ "qtree_info": {...},
+ "quota_report_info": {...},
+ "security_key_manager_key_info": {...},
+ "security_login_account_info": {...},
+ "snapmirror_info": {...}
+ "snapmirror_destination_info": {...}
+ "storage_bridge_info": {...}
+ "storage_failover_info": {...},
+ "volume_info": {...},
+ "vserver_login_banner_info": {...},
+ "vserver_motd_info": {...},
+ "vserver_info": {...},
+ "vserver_nfs_info": {...},
+ "vscan_status_info": {...},
+ "vscan_scanner_pool_info": {...},
+ "vscan_connection_status_all_info": {...},
+ "vscan_connection_extended_stats_info": {...}
+ }'
+'''
+
+import copy
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPGatherInfo(object):
+ '''Class with gather info methods'''
+
+ def __init__(self, module, max_records):
+ self.module = module
+ self.max_records = str(max_records)
+ volume_move_target_aggr_info = module.params.get('volume_move_target_aggr_info', dict())
+ if volume_move_target_aggr_info is None:
+ volume_move_target_aggr_info = dict()
+ self.netapp_info = dict()
+ self.desired_attributes = module.params['desired_attributes']
+ self.query = module.params['query']
+ self.translate_keys = not module.params['use_native_zapi_tags']
+ self.warnings = list() # warnings will be added to the info results, if any
+ self.set_error_flags()
+
+ # thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
+ # for starting this
+ # min_version identifies the ontapi version which supports this ZAPI
+ # use 0 if it is supported since 9.1
+ self.info_subsets = {
+ 'cluster_identity_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-identity-get',
+ 'attributes_list_tag': 'attributes',
+ 'attribute': 'cluster-identity-info',
+ 'key_fields': 'cluster-name',
+ },
+ 'min_version': '0',
+ },
+ 'cluster_image_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-image-get-iter',
+ 'attribute': 'cluster-image-info',
+ 'key_fields': 'node-id',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_log_forwarding_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-log-forward-get-iter',
+ 'attribute': 'cluster-log-forward-info',
+ 'key_fields': ('destination', 'port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-node-get-iter',
+ 'attribute': 'cluster-node-info',
+ 'key_fields': 'node-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_account_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-get-iter',
+ 'attribute': 'security-login-account-info',
+ 'key_fields': ('vserver', 'user-name', 'application', 'authentication-method'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_role_config_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-config-get-iter',
+ 'attribute': 'security-login-role-config-info',
+ 'key_fields': ('vserver', 'role-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_role_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-get-iter',
+ 'attribute': 'security-login-role-info',
+ 'key_fields': ('vserver', 'role-name', 'command-directory-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'aggregate_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-get-iter',
+ 'attribute': 'aggr-attributes',
+ 'key_fields': 'aggregate-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'volume_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-get-iter',
+ 'attribute': 'volume-attributes',
+ 'key_fields': ('name', 'owning-vserver-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'license_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'license-v2-list-info',
+ 'attributes_list_tag': None,
+ 'attribute': 'licenses',
+ },
+ 'min_version': '0',
+ },
+ 'lun_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-get-iter',
+ 'attribute': 'lun-info',
+ 'key_fields': ('vserver', 'path'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_check_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-check-get-iter',
+ 'attribute': 'metrocluster-check-info',
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-get',
+ 'attribute': 'metrocluster-info',
+ 'attributes_list_tag': 'attributes',
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-node-get-iter',
+ 'attribute': 'metrocluster-node-info',
+ 'key_fields': ('cluster-name', 'node-name'),
+ },
+ 'min_version': '0',
+ },
+ 'net_dns_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-dns-get-iter',
+ 'attribute': 'net-dns-info',
+ 'key_fields': 'vserver-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-get-iter',
+ 'attribute': 'net-interface-info',
+ 'key_fields': 'interface-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_service_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-service-policy-get-iter',
+ 'attribute': 'net-interface-service-policy-info',
+ 'key_fields': ('vserver', 'policy'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '150',
+ },
+ 'net_port_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-get-iter',
+ 'attribute': 'net-port-info',
+ 'key_fields': ('node', 'port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_key_manager_key_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-key-manager-key-get-iter',
+ 'attribute': 'security-key-manager-key-info',
+ 'key_fields': ('node', 'key-id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'storage_failover_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cf-get-iter',
+ 'attribute': 'storage-failover-info',
+ 'key_fields': 'node',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_motd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-motd-get-iter',
+ 'attribute': 'vserver-motd-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_login_banner_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-login-banner-get-iter',
+ 'attribute': 'vserver-login-banner-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-get-iter',
+ 'attribute': 'vserver-info',
+ 'key_fields': 'vserver-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_ifgrp_info': {
+ 'method': self.get_ifgrp_info,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontap_system_version': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-get-version',
+ 'attributes_list_tag': None,
+ },
+ 'min_version': '0',
+ },
+ 'ontap_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontapi_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'clock_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'clock-get-clock',
+ 'attributes_list_tag': None,
+ },
+ 'min_version': '0'
+ },
+ 'system_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-node-get-iter',
+ 'attribute': 'node-details-info',
+ 'key_fields': 'node',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'igroup_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'igroup-get-iter',
+ 'attribute': 'initiator-group-info',
+ 'key_fields': ('vserver', 'initiator-group-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'iscsi_service_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'iscsi-service-get-iter',
+ 'attribute': 'iscsi-service-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'qos_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-policy-group-get-iter',
+ 'attribute': 'qos-policy-group-info',
+ 'key_fields': 'policy-group',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'qtree_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qtree-list-iter',
+ 'attribute': 'qtree-info',
+ 'key_fields': ('vserver', 'volume', 'id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'quota_report_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'quota-report-iter',
+ 'attribute': 'quota',
+ 'key_fields': ('vserver', 'volume', 'tree', 'quota-type', 'quota-target'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_status_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-status-get-iter',
+ 'attribute': 'vscan-status-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_scanner_pool_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-scanner-pool-get-iter',
+ 'attribute': 'vscan-scanner-pool-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_connection_status_all_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-connection-status-all-get-iter',
+ 'attribute': 'vscan-connection-status-all-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_connection_extended_stats_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-connection-extended-stats-get-iter',
+ 'attribute': 'vscan-connection-extended-stats-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapshot_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapshot-get-iter',
+ 'attribute': 'snapshot-info',
+ 'key_fields': ('vserver', 'volume', 'name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'storage_bridge_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-bridge-get-iter',
+ 'attribute': 'storage-bridge-info',
+ 'key_fields': 'name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ # supported in ONTAP 9.3 and onwards
+ 'qos_adaptive_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-adaptive-policy-group-get-iter',
+ 'attribute': 'qos-adaptive-policy-group-info',
+ 'key_fields': 'policy-group',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '130',
+ },
+ # supported in ONTAP 9.4 and onwards
+ 'nvme_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-get-iter',
+ 'attribute': 'nvme-target-service-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-interface-get-iter',
+ 'attribute': 'nvme-interface-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_subsystem_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-subsystem-get-iter',
+ 'attribute': 'nvme-subsystem-info',
+ 'key_fields': 'subsystem',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_namespace_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-namespace-get-iter',
+ 'attribute': 'nvme-namespace-info',
+ 'key_fields': 'path',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+
+ # Alpha Order
+
+ 'aggr_efficiency_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-efficiency-get-iter',
+ 'attribute': 'aggr-efficiency-info',
+ 'key_fields': ('node', 'aggregate'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'cifs_options_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-options-get-iter',
+ 'attribute': 'cifs-options',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_server_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-server-get-iter',
+ 'attribute': 'cifs-server-config',
+ # preferred key is <vserver>:<domain>:<cifs-server>
+ # alternate key is <vserver>:<domain-workgroup>:<cifs-server>
+ 'key_fields': ('vserver', ('domain', 'domain-workgroup'), 'cifs-server'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_share_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-share-get-iter',
+ 'attribute': 'cifs-share',
+ 'key_fields': ('share-name', 'path', 'cifs-server'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_vserver_security_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-security-get-iter',
+ 'attribute': 'cifs-security',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_peer_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-peer-get-iter',
+ 'attribute': 'cluster-peer-info',
+ 'key_fields': ('cluster-name', 'remote-cluster-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_switch_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-switch-get-iter',
+ 'attribute': 'cluster-switch-info',
+ 'key_fields': ('device', 'model', 'serial-number'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '160',
+ },
+ 'disk_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-disk-get-iter',
+ 'attribute': 'storage-disk-info',
+ 'key_fields': ('disk-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'env_sensors_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'environment-sensors-get-iter',
+ 'attribute': 'environment-sensors-info',
+ 'key_fields': ('node-name', 'sensor-name'),
+ 'query': {'max-records': self.max_records},
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'event_notification_destination_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ems-event-notification-destination-get-iter',
+ 'attribute': 'event-notification-destination-info',
+ 'key_fields': ('name', 'type'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'event_notification_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ems-event-notification-get-iter',
+ 'attribute': 'event-notification',
+ 'key_fields': ('id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'export_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'export-policy-get-iter',
+ 'attribute': 'export-policy-info',
+ 'key_fields': ('vserver', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'export_rule_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'export-rule-get-iter',
+ 'attribute': 'export-rule-info',
+ 'key_fields': ('vserver-name', 'policy-name', 'rule-index'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_adapter_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ucm-adapter-get-iter',
+ 'attribute': 'uc-adapter-info',
+ 'key_fields': ('adapter-name', 'node-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_alias_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'fcp-wwpnalias-get-iter',
+ 'attribute': 'aliases-info',
+ 'key_fields': ('aliases-alias', 'vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_service_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'fcp-service-get-iter',
+ 'attribute': 'fcp-service-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'job_schedule_cron_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'job-schedule-cron-get-iter',
+ 'attribute': 'job-schedule-cron-info',
+ 'key_fields': ('job-schedule-name', 'job-schedule-cluster'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'kerberos_realm_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'kerberos-realm-get-iter',
+ 'attribute': 'kerberos-realm',
+ 'key_fields': ('vserver-name', 'realm'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ldap_client': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ldap-client-get-iter',
+ 'attribute': 'ldap-client',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ldap_config': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ldap-config-get-iter',
+ 'attribute': 'ldap-config',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'lun_map_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-map-get-iter',
+ 'attribute': 'lun-map-info',
+ 'key_fields': ('initiator-group', 'lun-id', 'node', 'path', 'vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_dev_discovery_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-device-discovery-get-iter',
+ 'attribute': 'net-device-discovery-info',
+ 'key_fields': ('port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_failover_group_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-failover-group-get-iter',
+ 'attribute': 'net-failover-group-info',
+ 'key_fields': ('vserver', 'failover-group'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_firewall_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-firewall-policy-get-iter',
+ 'attribute': 'net-firewall-policy-info',
+ 'key_fields': ('policy', 'vserver', 'service'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_ipspaces_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-ipspaces-get-iter',
+ 'attribute': 'net-ipspaces-info',
+ 'key_fields': ('ipspace'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_port_broadcast_domain_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-broadcast-domain-get-iter',
+ 'attribute': 'net-port-broadcast-domain-info',
+ 'key_fields': ('broadcast-domain', 'ipspace'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_routes_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-routes-get-iter',
+ 'attribute': 'net-vs-routes-info',
+ 'key_fields': ('vserver', 'destination', 'gateway'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_vlan_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-vlan-get-iter',
+ 'attribute': 'vlan-info',
+ 'key_fields': ('interface-name', 'node'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntfs_dacl_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'file-directory-security-ntfs-dacl-get-iter',
+ 'attribute': 'file-directory-security-ntfs-dacl',
+ 'key_fields': ('vserver', 'ntfs-sd', 'account', 'access-type'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntfs_sd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'file-directory-security-ntfs-get-iter',
+ 'attribute': 'file-directory-security-ntfs',
+ 'key_fields': ('vserver', 'ntfs-sd'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntp_server_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ntp-server-get-iter',
+ 'attribute': 'ntp-server-info',
+ 'key_fields': ('server-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'role_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-get-iter',
+ 'attribute': 'security-login-role-info',
+ 'key_fields': ('vserver', 'role-name', 'access-level', 'command-directory-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'service_processor_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'service-processor-get-iter',
+ 'attribute': 'service-processor-info',
+ 'key_fields': ('node'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'service_processor_network_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'service-processor-network-get-iter',
+ 'attribute': 'service-processor-network-info',
+ # don't use key_fieldss, as we cannot build a key with optional key_fieldss
+ # without a key, we'll get a list of dictionaries
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'shelf_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-shelf-info-get-iter',
+ 'attribute': 'storage-shelf-info',
+ 'key_fields': ('shelf-id', 'serial-number'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sis_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'sis-get-iter',
+ 'attribute': 'sis-status-info',
+ 'key_fields': 'path',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sis_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'sis-policy-get-iter',
+ 'attribute': 'sis-policy-info',
+ 'key_fields': ('vserver', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapmirror_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-get-iter',
+ 'attribute': 'snapmirror-info',
+ 'key_fields': 'destination-location',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'snapmirror_destination_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-get-destination-iter',
+ 'attribute': 'snapmirror-destination-info',
+ 'key_fields': 'destination-location',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'snapmirror_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-policy-get-iter',
+ 'attribute': 'snapmirror-policy-info',
+ 'key_fields': ('vserver-name', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapshot_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapshot-policy-get-iter',
+ 'attribute': 'snapshot-policy-info',
+ 'key_fields': ('vserver-name', 'policy'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'subsys_health_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'diagnosis-subsystem-config-get-iter',
+ 'attribute': 'diagnosis-subsystem-config-info',
+ 'key_fields': 'subsystem',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sys_cluster_alerts': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'diagnosis-alert-get-iter',
+ 'attribute': 'diagnosis-alert-info',
+ 'key_fields': ('node', 'alerting-resource'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sysconfig_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-get-node-info-iter',
+ 'attribute': 'system-info',
+ 'key_fields': ('system-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'volume_move_target_aggr_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-move-target-aggr-get-iter',
+ 'attribute': 'volume-move-target-aggr-info',
+ 'query': {'max-records': self.max_records,
+ 'volume-name': volume_move_target_aggr_info.get('volume_name', None),
+ 'vserver': volume_move_target_aggr_info.get('vserver', None)},
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'volume_space_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-space-get-iter',
+ 'attribute': 'space-info',
+ 'key_fields': ('vserver', 'volume'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-status-get-iter',
+ 'attribute': 'vscan-status-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_peer_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-peer-get-iter',
+ 'attribute': 'vserver-peer-info',
+ 'key_fields': ('vserver', 'remote-vserver-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ }
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ # use vserver tunneling if vserver is present (not None)
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=module.params['vserver'])
+
+ def ontapi(self):
+ '''Method to get ontapi version'''
+
+ api = 'system-get-ontapi-version'
+ api_call = netapp_utils.zapi.NaElement(api)
+ try:
+ results = self.server.invoke_successfully(api_call, enable_tunneling=True)
+ ontapi_version = results.get_child_content('minor-version')
+ return ontapi_version if ontapi_version is not None else '0'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api, to_native(error)), exception=traceback.format_exc())
+
+ def call_api(self, call, attributes_list_tag='attributes-list', query=None, fail_on_error=True):
+ '''Main method to run an API call'''
+
+ api_call = netapp_utils.zapi.NaElement(call)
+ initial_result = None
+ result = None
+
+ if query:
+ for key, val in query.items():
+ # Can val be nested?
+ api_call.add_new_child(key, val)
+
+ if self.desired_attributes is not None:
+ api_call.translate_struct(self.desired_attributes)
+ if self.query is not None:
+ api_call.translate_struct(self.query)
+ try:
+ initial_result = self.server.invoke_successfully(api_call, enable_tunneling=True)
+ next_tag = initial_result.get_child_by_name('next-tag')
+ result = copy.copy(initial_result)
+
+ while next_tag:
+ next_tag_call = netapp_utils.zapi.NaElement(call)
+ if query:
+ for key, val in query.items():
+ next_tag_call.add_new_child(key, val)
+
+ next_tag_call.add_new_child("tag", next_tag.get_content(), True)
+ next_result = self.server.invoke_successfully(next_tag_call, enable_tunneling=True)
+
+ next_tag = next_result.get_child_by_name('next-tag')
+ if attributes_list_tag is None:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api_call.to_string(), "'next-tag' is not expected for this API"))
+
+ result_attr = result.get_child_by_name(attributes_list_tag)
+ new_records = next_result.get_child_by_name(attributes_list_tag)
+ if new_records:
+ for record in new_records.get_children():
+ result_attr.add_child_elem(record)
+
+ return result, None
+
+ except netapp_utils.zapi.NaApiError as error:
+ if call in ['security-key-manager-key-get-iter']:
+ return result, None
+ kind, error_message = netapp_utils.classify_zapi_exception(error)
+ if kind == 'missing_vserver_api_error':
+ # for missing_vserver_api_error, the API is already in error_message
+ error_message = "Error invalid API. %s" % error_message
+ else:
+ error_message = "Error calling API %s: %s" % (call, error_message)
+ if self.error_flags[kind] and fail_on_error:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ return None, error_message
+
+ def get_ifgrp_info(self):
+ '''Method to get network port ifgroups info'''
+
+ try:
+ net_port_info = self.netapp_info['net_port_info']
+ except KeyError:
+ net_port_info_calls = self.info_subsets['net_port_info']
+ net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
+ interfaces = net_port_info.keys()
+
+ ifgrps = []
+ for ifn in interfaces:
+ if net_port_info[ifn]['port_type'] == 'if_group':
+ ifgrps.append(ifn)
+
+ net_ifgrp_info = dict()
+ for ifgrp in ifgrps:
+ query = dict()
+ query['node'], query['ifgrp-name'] = ifgrp.split(':')
+
+ tmp = self.get_generic_get_iter('net-port-ifgrp-get', key_fields=('node', 'ifgrp-name'),
+ attribute='net-ifgrp-info', query=query,
+ attributes_list_tag='attributes')
+ net_ifgrp_info = net_ifgrp_info.copy()
+ net_ifgrp_info.update(tmp)
+ return net_ifgrp_info
+
+ def get_generic_get_iter(self, call, attribute=None, key_fields=None, query=None, attributes_list_tag='attributes-list', fail_on_error=True):
+ '''Method to run a generic get-iter call'''
+
+ generic_call, error = self.call_api(call, attributes_list_tag, query, fail_on_error=fail_on_error)
+
+ if error is not None:
+ return {'error': error}
+
+ if generic_call is None:
+ return None
+
+ if attributes_list_tag is None:
+ attributes_list = generic_call
+ else:
+ attributes_list = generic_call.get_child_by_name(attributes_list_tag)
+
+ if attributes_list is None:
+ return None
+
+ if key_fields is None:
+ out = []
+ else:
+ out = {}
+
+ iteration = 0
+ for child in attributes_list.get_children():
+ iteration += 1
+ dic = xmltodict.parse(child.to_string(), xml_attribs=False)
+
+ if attribute is not None:
+ dic = dic[attribute]
+
+ info = json.loads(json.dumps(dic))
+ if self.translate_keys:
+ info = convert_keys(info)
+ if isinstance(key_fields, str):
+ try:
+ unique_key = _finditem(dic, key_fields)
+ except KeyError as exc:
+ error_message = 'Error: key %s not found for %s, got: %s' % (str(exc), call, repr(info))
+ if self.error_flags['key_error']:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ unique_key = 'Error_%d_key_not_found_%s' % (iteration, exc.args[0])
+ elif isinstance(key_fields, tuple):
+ try:
+ unique_key = ':'.join([_finditem(dic, el) for el in key_fields])
+ except KeyError as exc:
+ error_message = 'Error: key %s not found for %s, got: %s' % (str(exc), call, repr(info))
+ if self.error_flags['key_error']:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ unique_key = 'Error_%d_key_not_found_%s' % (iteration, exc.args[0])
+ else:
+ unique_key = None
+ if unique_key is not None:
+ out = out.copy()
+ out.update({unique_key: info})
+ else:
+ out.append(info)
+
+ if attributes_list_tag is None and key_fields is None:
+ if len(out) == 1:
+ # flatten the list as only 1 element is expected
+ out = out[0]
+ elif len(out) > 1:
+ # aggregate a list of dictionaries into a single dict
+ # make sure we only have dicts and no key duplication
+ dic = dict()
+ key_count = 0
+ for item in out:
+ if not isinstance(item, dict):
+ # abort if we don't see a dict
+ key_count = -1
+ break
+ dic.update(item)
+ key_count += len(item)
+ if key_count == len(dic):
+ # no duplicates!
+ out = dic
+
+ return out
+
+ def send_ems_event(self):
+ ''' use vserver if available, or cluster vserver '''
+ if self.module.params['vserver']:
+ server = self.server
+ else:
+ results = netapp_utils.get_cserver(self.server)
+ if results is None:
+ # most likely we're on a vserver interface already
+ server = self.server
+ else:
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_info", server)
+
+ def get_all(self, gather_subset):
+ '''Method to get all subsets'''
+
+ self.send_ems_event()
+
+ self.netapp_info['ontapi_version'] = self.ontapi()
+ self.netapp_info['ontap_version'] = self.netapp_info['ontapi_version']
+
+ run_subset = self.get_subset(gather_subset, self.netapp_info['ontapi_version'])
+ if 'ontap_version' in gather_subset:
+ if netapp_utils.has_feature(self.module, 'deprecation_warning'):
+ self.netapp_info['deprecation_warning'] = 'ontap_version is deprecated, please use ontapi_version'
+ if 'help' in gather_subset:
+ self.netapp_info['help'] = sorted(run_subset)
+ else:
+ if self.desired_attributes is not None:
+ if len(run_subset) > 1:
+ self.module.fail_json(msg="desired_attributes option is only supported with a single subset")
+ self.sanitize_desired_attributes()
+ if self.query is not None:
+ if len(run_subset) > 1:
+ self.module.fail_json(msg="query option is only supported with a single subset")
+ self.sanitize_query()
+ for subset in run_subset:
+ call = self.info_subsets[subset]
+ self.netapp_info[subset] = call['method'](**call['kwargs'])
+
+ if self.warnings:
+ self.netapp_info['module_warnings'] = self.warnings
+
+ return self.netapp_info
+
+ def get_subset(self, gather_subset, version):
+ '''Method to get a single subset'''
+
+ runable_subsets = set()
+ exclude_subsets = set()
+ usable_subsets = [key for key in self.info_subsets if version >= self.info_subsets[key]['min_version']]
+ if 'help' in gather_subset:
+ return usable_subsets
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(usable_subsets)
+ return runable_subsets
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ return set()
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in usable_subsets:
+ if subset not in self.info_subsets.keys():
+ self.module.fail_json(msg='Bad subset: %s' % subset)
+ self.module.fail_json(msg='Remote system at version %s does not support %s' %
+ (version, subset))
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(usable_subsets)
+
+ runable_subsets.difference_update(exclude_subsets)
+
+ return runable_subsets
+
+ def get_summary(self, ontap_info):
+ for info in ontap_info:
+ if '_info' in info and ontap_info[info] is not None and isinstance(ontap_info[info], dict):
+ # don't summarize errors
+ if 'error' not in ontap_info[info]:
+ ontap_info[info] = ontap_info[info].keys()
+ return ontap_info
+
+ def sanitize_desired_attributes(self):
+ ''' add top 'desired-attributes' if absent
+ check for _ as more likely ZAPI does not take them
+ '''
+ da_key = 'desired-attributes'
+ if da_key not in self.desired_attributes:
+ desired_attributes = dict()
+ desired_attributes[da_key] = self.desired_attributes
+ self.desired_attributes = desired_attributes
+ self.check_for___in_keys(self.desired_attributes)
+
+ def sanitize_query(self):
+ ''' add top 'query' if absent
+ check for _ as more likely ZAPI does not take them
+ '''
+ key = 'query'
+ if key not in self.query:
+ query = dict()
+ query[key] = self.query
+ self.query = query
+ self.check_for___in_keys(self.query)
+
+ def check_for___in_keys(self, d_param):
+ '''Method to warn on underscore in a ZAPI tag'''
+ if isinstance(d_param, dict):
+ for key, val in d_param.items():
+ self.check_for___in_keys(val)
+ if '_' in key:
+ self.warnings.append("Underscore in ZAPI tag: %s, do you mean '-'?" % key)
+ elif isinstance(d_param, list):
+ for val in d_param:
+ self.check_for___in_keys(val)
+
+ def set_error_flags(self):
+ error_flags = self.module.params['continue_on_error']
+ generic_flags = ('always', 'never')
+ if len(error_flags) > 1:
+ for key in generic_flags:
+ if key in error_flags:
+ self.module.fail_json(msg="%s needs to be the only keyword in 'continue_on_error' option." % key)
+ specific_flags = ('rpc_error', 'missing_vserver_api_error', 'key_error', 'other_error')
+ for key in error_flags:
+ if key not in generic_flags and key not in specific_flags:
+ self.module.fail_json(msg="%s is not a valid keyword in 'continue_on_error' option." % key)
+ self.error_flags = dict()
+ for flag in specific_flags:
+ self.error_flags[flag] = True
+ for key in error_flags:
+ if key == 'always' or key == flag:
+ self.error_flags[flag] = False
+
+
+# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
+def __finditem(obj, key):
+
+ if key in obj:
+ if obj[key] is None:
+ return "None"
+ return obj[key]
+ for dummy, val in obj.items():
+ if isinstance(val, dict):
+ item = __finditem(val, key)
+ if item is not None:
+ return item
+ return None
+
+
+def _finditem(obj, keys):
+ ''' if keys is a string, use it as a key
+ if keys is a tuple, stop on the first valid key
+ if no valid key is found, raise a KeyError '''
+
+ value = None
+ if isinstance(keys, str):
+ value = __finditem(obj, keys)
+ elif isinstance(keys, tuple):
+ for key in keys:
+ value = __finditem(obj, key)
+ if value is not None:
+ break
+ if value is not None:
+ return value
+ raise KeyError(str(keys))
+
+
+def convert_keys(d_param):
+ '''Method to convert hyphen to underscore'''
+
+ if isinstance(d_param, dict):
+ out = {}
+ for key, val in d_param.items():
+ val = convert_keys(val)
+ out[key.replace('-', '_')] = val
+ return out
+ elif isinstance(d_param, list):
+ return [convert_keys(val) for val in d_param]
+ return d_param
+
+
+def main():
+ '''Execute action'''
+
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='info', choices=['info']),
+ gather_subset=dict(default=['all'], type='list', elements='str'),
+ vserver=dict(type='str', required=False),
+ max_records=dict(type='int', default=1024, required=False),
+ summary=dict(type='bool', default=False, required=False),
+ volume_move_target_aggr_info=dict(
+ type="dict",
+ required=False,
+ options=dict(
+ volume_name=dict(type='str', required=True),
+ vserver=dict(type='str', required=True)
+ )
+ ),
+ desired_attributes=dict(type='dict', required=False),
+ use_native_zapi_tags=dict(type='bool', required=False, default=False),
+ continue_on_error=dict(type='list', required=False, elements='str', default=['never']),
+ query=dict(type='dict', required=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_XMLTODICT:
+ module.fail_json(msg="xmltodict missing")
+
+ if not HAS_JSON:
+ module.fail_json(msg="json missing")
+
+ state = module.params['state']
+ gather_subset = module.params['gather_subset']
+ summary = module.params['summary']
+ if gather_subset is None:
+ gather_subset = ['all']
+ max_records = module.params['max_records']
+ gf_obj = NetAppONTAPGatherInfo(module, max_records)
+ gf_all = gf_obj.get_all(gather_subset)
+ if summary:
+ gf_all = gf_obj.get_summary(gf_all)
+ result = {'state': state, 'changed': False}
+ module.exit_json(ontap_info=gf_all, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py
new file mode 100644
index 00000000..9896d6d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py
@@ -0,0 +1,613 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_export_policy_rule
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_interface
+short_description: NetApp ONTAP LIF configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Creating / deleting and modifying the LIF.
+
+options:
+ state:
+ description:
+ - Whether the specified interface should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ interface_name:
+ description:
+ - Specifies the logical interface (LIF) name.
+ required: true
+ type: str
+
+ home_node:
+ description:
+ - Specifies the LIF's home node.
+ - By default, the first node from the cluster is considered as home node
+ type: str
+
+ current_node:
+ description:
+ - Specifies the LIF's current node.
+ - By default, this is home_node
+ type: str
+
+ home_port:
+ description:
+ - Specifies the LIF's home port.
+ - Required when C(state=present)
+ type: str
+
+ current_port:
+ description:
+ - Specifies the LIF's current port.
+ type: str
+
+ role:
+ description:
+ - Specifies the role of the LIF.
+ - When setting role as "intercluster" or "cluster", setting protocol is not supported.
+ - When creating a "cluster" role, the node name will appear as the prefix in the name of LIF.
+ - For example, if the specified name is clif and node name is node1, the LIF name appears in the ONTAP as node1_clif.
+ - Possible values are 'undef', 'cluster', 'data', 'node-mgmt', 'intercluster', 'cluster-mgmt'.
+ - Required when C(state=present) unless service_policy is present and ONTAP version is 9.8 or better.
+ type: str
+
+ address:
+ description:
+ - Specifies the LIF's IP address.
+ - Required when C(state=present) and is_ipv4_link_local if false and subnet_name is not set.
+ type: str
+
+ netmask:
+ description:
+ - Specifies the LIF's netmask.
+ - Required when C(state=present) and is_ipv4_link_local if false and subnet_name is not set.
+ type: str
+
+ is_ipv4_link_local:
+ description:
+ - Specifies the LIF's are to acquire a ipv4 link local address.
+ - Use case for this is when creating Cluster LIFs to allow for auto assignment of ipv4 link local address.
+ version_added: '20.1.0'
+ type: bool
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ firewall_policy:
+ description:
+ - Specifies the firewall policy for the LIF.
+ type: str
+
+ failover_policy:
+ description:
+ - Specifies the failover policy for the LIF.
+ choices: ['disabled', 'system-defined', 'local-only', 'sfo-partner-only', 'broadcast-domain-wide']
+ type: str
+
+ failover_group:
+ description:
+ - Specifies the failover group for the LIF.
+ version_added: '20.1.0'
+ type: str
+
+ subnet_name:
+ description:
+ - Subnet where the interface address is allocated from.
+ - If the option is not used, the IP address will need to be provided by the administrator during configuration.
+ version_added: 2.8.0
+ type: str
+
+ admin_status:
+ choices: ['up', 'down']
+ description:
+ - Specifies the administrative status of the LIF.
+ type: str
+
+ is_auto_revert:
+ description:
+ - If true, data LIF will revert to its home node under certain circumstances such as startup,
+ - and load balancing migration capability is disabled automatically
+ type: bool
+
+ force_subnet_association:
+ description:
+ - Set this to true to acquire the address from the named subnet and assign the subnet to the LIF.
+ version_added: 2.9.0
+ type: bool
+
+ protocols:
+ description:
+ - Specifies the list of data protocols configured on the LIF. By default, the values in this element are nfs, cifs and fcache.
+ - Other supported protocols are iscsi and fcp. A LIF can be configured to not support any data protocols by specifying 'none'.
+ - Protocol values of none, iscsi, fc-nvme or fcp can't be combined with any other data protocol(s).
+ - address, netmask and firewall_policy parameters are not supported for 'fc-nvme' option.
+ type: list
+ elements: str
+
+ dns_domain_name:
+ description:
+ - Specifies the unique, fully qualified domain name of the DNS zone of this LIF.
+ version_added: 2.9.0
+ type: str
+
+ listen_for_dns_query:
+ description:
+ - If True, this IP address will listen for DNS queries for the dnszone specified.
+ version_added: 2.9.0
+ type: bool
+
+ is_dns_update_enabled:
+ description:
+ - Specifies if DNS update is enabled for this LIF. Dynamic updates will be sent for this LIF if updates are enabled at Vserver level.
+ version_added: 2.9.0
+ type: bool
+
+ service_policy:
+ description:
+ - Starting with ONTAP 9.5, you can configure LIF service policies to identify a single service or a list of services that will use a LIF.
+ - In ONTAP 9.5, you can assign service policies only for LIFs in the admin SVM.
+ - In ONTAP 9.6, you can additionally assign service policies for LIFs in the data SVMs.
+ - When you specify a service policy for a LIF, you need not specify the data protocol and role for the LIF.
+ - NOTE that role is still required because of a ZAPI issue. This limitation is removed in ONTAP 9.8.
+ - Creating LIFs by specifying the role and data protocols is also supported.
+ version_added: '20.4.0'
+ type: str
+'''
+
+EXAMPLES = '''
+ - name: Create interface
+ na_ontap_interface:
+ state: present
+ interface_name: data2
+ home_port: e0d
+ home_node: laurentn-vsim1
+ role: data
+ protocols:
+ - nfs
+ - cifs
+ admin_status: up
+ failover_policy: local-only
+ firewall_policy: mgmt
+ is_auto_revert: true
+ address: 10.10.10.10
+ netmask: 255.255.255.0
+ force_subnet_association: false
+ dns_domain_name: test.com
+ listen_for_dns_query: true
+ is_dns_update_enabled: true
+ vserver: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create cluster interface
+ na_ontap_interface:
+ state: present
+ interface_name: cluster_lif
+ home_port: e0a
+ home_node: cluster1-01
+ role: cluster
+ admin_status: up
+ is_auto_revert: true
+ is_ipv4_link_local: true
+ vserver: Cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Migrate an interface
+ na_ontap_interface:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ vserver: ansible
+ https: true
+ validate_certs: false
+ state: present
+ interface_name: carchi_interface3
+ home_port: e0d
+ home_node: ansdev-stor-1
+ current_node: ansdev-stor-2
+ role: data
+ failover_policy: local-only
+ firewall_policy: mgmt
+ is_auto_revert: true
+ address: 10.10.10.12
+ netmask: 255.255.255.0
+ force_subnet_association: false
+ admin_status: up
+
+ - name: Delete interface
+ na_ontap_interface:
+ state: absent
+ interface_name: data2
+ vserver: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapInterface(object):
+ ''' object to describe interface info '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=[
+ 'present', 'absent'], default='present'),
+ interface_name=dict(required=True, type='str'),
+ home_node=dict(required=False, type='str', default=None),
+ current_node=dict(required=False, type='str'),
+ home_port=dict(required=False, type='str'),
+ current_port=dict(required=False, type='str'),
+ role=dict(required=False, type='str'),
+ is_ipv4_link_local=dict(required=False, type='bool', default=None),
+ address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ firewall_policy=dict(required=False, type='str', default=None),
+ failover_policy=dict(required=False, type='str', default=None,
+ choices=['disabled', 'system-defined',
+ 'local-only', 'sfo-partner-only', 'broadcast-domain-wide']),
+ failover_group=dict(required=False, type='str'),
+ admin_status=dict(required=False, choices=['up', 'down']),
+ subnet_name=dict(required=False, type='str'),
+ is_auto_revert=dict(required=False, type='bool', default=None),
+ protocols=dict(required=False, type='list', elements='str'),
+ force_subnet_association=dict(required=False, type='bool', default=None),
+ dns_domain_name=dict(required=False, type='str'),
+ listen_for_dns_query=dict(required=False, type='bool'),
+ is_dns_update_enabled=dict(required=False, type='bool'),
+ service_policy=dict(required=False, type='str', default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ['subnet_name', 'address'],
+ ['subnet_name', 'netmask'],
+ ['is_ipv4_link_local', 'address'],
+ ['is_ipv4_link_local', 'netmask'],
+ ['is_ipv4_link_local', 'subnet_name']
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_interface(self):
+ """
+ Return details about the interface
+ :param:
+ name : Name of the interface
+
+ :return: Details about the interface. None if not found.
+ :rtype: dict
+ """
+ interface_info = netapp_utils.zapi.NaElement('net-interface-get-iter')
+ interface_attributes = netapp_utils.zapi.NaElement('net-interface-info')
+ interface_attributes.add_new_child('interface-name', self.parameters['interface_name'])
+ interface_attributes.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(interface_attributes)
+ interface_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(interface_info, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error fetching interface details for %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ interface_attributes = result.get_child_by_name('attributes-list'). \
+ get_child_by_name('net-interface-info')
+ return_value = {
+ 'interface_name': self.parameters['interface_name'],
+ 'admin_status': interface_attributes['administrative-status'],
+ 'home_port': interface_attributes['home-port'],
+ 'home_node': interface_attributes['home-node'],
+ 'failover_policy': interface_attributes['failover-policy'].replace('_', '-'),
+ }
+ if interface_attributes.get_child_by_name('is-auto-revert'):
+ return_value['is_auto_revert'] = True if interface_attributes['is-auto-revert'] == 'true' else False
+ if interface_attributes.get_child_by_name('failover-group'):
+ return_value['failover_group'] = interface_attributes['failover-group']
+ if interface_attributes.get_child_by_name('address'):
+ return_value['address'] = interface_attributes['address']
+ if interface_attributes.get_child_by_name('netmask'):
+ return_value['netmask'] = interface_attributes['netmask']
+ if interface_attributes.get_child_by_name('firewall-policy'):
+ return_value['firewall_policy'] = interface_attributes['firewall-policy']
+ if interface_attributes.get_child_by_name('dns-domain-name') != 'none':
+ return_value['dns_domain_name'] = interface_attributes['dns-domain-name']
+ else:
+ return_value['dns_domain_name'] = None
+ if interface_attributes.get_child_by_name('listen-for-dns-query'):
+ return_value['listen_for_dns_query'] = self.na_helper.get_value_for_bool(True, interface_attributes[
+ 'listen-for-dns-query'])
+ if interface_attributes.get_child_by_name('is-dns-update-enabled'):
+ return_value['is_dns_update_enabled'] = self.na_helper.get_value_for_bool(True, interface_attributes[
+ 'is-dns-update-enabled'])
+ if interface_attributes.get_child_by_name('service-policy'):
+ return_value['service_policy'] = interface_attributes['service-policy']
+ if interface_attributes.get_child_by_name('current-node'):
+ return_value['current_node'] = interface_attributes['current-node']
+ if interface_attributes.get_child_by_name('current-port'):
+ return_value['current_port'] = interface_attributes['current-port']
+ return return_value
+
+ @staticmethod
+ def set_options(options, parameters):
+ """ set attributes for create or modify """
+ if parameters.get('role') is not None:
+ options['role'] = parameters['role']
+ if parameters.get('home_node') is not None:
+ options['home-node'] = parameters['home_node']
+ if parameters.get('home_port') is not None:
+ options['home-port'] = parameters['home_port']
+ if parameters.get('subnet_name') is not None:
+ options['subnet-name'] = parameters['subnet_name']
+ if parameters.get('address') is not None:
+ options['address'] = parameters['address']
+ if parameters.get('netmask') is not None:
+ options['netmask'] = parameters['netmask']
+ if parameters.get('failover_policy') is not None:
+ options['failover-policy'] = parameters['failover_policy']
+ if parameters.get('failover_group') is not None:
+ options['failover-group'] = parameters['failover_group']
+ if parameters.get('firewall_policy') is not None:
+ options['firewall-policy'] = parameters['firewall_policy']
+ if parameters.get('is_auto_revert') is not None:
+ options['is-auto-revert'] = 'true' if parameters['is_auto_revert'] is True else 'false'
+ if parameters.get('admin_status') is not None:
+ options['administrative-status'] = parameters['admin_status']
+ if parameters.get('force_subnet_association') is not None:
+ options['force-subnet-association'] = 'true' if parameters['force_subnet_association'] else 'false'
+ if parameters.get('dns_domain_name') is not None:
+ options['dns-domain-name'] = parameters['dns_domain_name']
+ if parameters.get('listen_for_dns_query') is not None:
+ options['listen-for-dns-query'] = str(parameters['listen_for_dns_query'])
+ if parameters.get('is_dns_update_enabled') is not None:
+ options['is-dns-update-enabled'] = str(parameters['is_dns_update_enabled'])
+ if parameters.get('is_ipv4_link_local') is not None:
+ options['is-ipv4-link-local'] = 'true' if parameters['is_ipv4_link_local'] else 'false'
+ if parameters.get('service_policy') is not None:
+ options['service-policy'] = parameters['service_policy']
+
+ def set_protocol_option(self, required_keys):
+ """ set protocols for create """
+ if self.parameters.get('protocols') is not None:
+ data_protocols_obj = netapp_utils.zapi.NaElement('data-protocols')
+ for protocol in self.parameters.get('protocols'):
+ if protocol.lower() in ['fc-nvme', 'fcp']:
+ if 'address' in required_keys:
+ required_keys.remove('address')
+ if 'home_port' in required_keys:
+ required_keys.remove('home_port')
+ if 'netmask' in required_keys:
+ required_keys.remove('netmask')
+ not_required_params = set(['address', 'netmask', 'firewall_policy'])
+ if not not_required_params.isdisjoint(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error: Following parameters for creating interface are not supported'
+ ' for data-protocol fc-nvme: %s' % ', '.join(not_required_params))
+ data_protocols_obj.add_new_child('data-protocol', protocol)
+ return data_protocols_obj
+ return None
+
+ def get_home_node_for_cluster(self):
+ ''' get the first node name from this cluster '''
+ get_node = netapp_utils.zapi.NaElement('cluster-node-get-iter')
+ attributes = {
+ 'query': {
+ 'cluster-node-info': {}
+ }
+ }
+ get_node.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ if str(exc.code) == '13003' or exc.message == 'ZAPI is not enabled in pre-cluster mode.':
+ return None
+ self.module.fail_json(msg='Error fetching node for interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes = result.get_child_by_name('attributes-list')
+ return attributes.get_child_by_name('cluster-node-info').get_child_content('node-name')
+ return None
+
+ def validate_create_parameters(self, keys):
+ '''
+ Validate if required parameters for create are present.
+ Parameter requirement might vary based on given data-protocol.
+ :return: None
+ '''
+ if self.parameters.get('home_node') is None:
+ node = self.get_home_node_for_cluster()
+ if node is not None:
+ self.parameters['home_node'] = node
+ # validate if mandatory parameters are present for create
+ if not keys.issubset(set(self.parameters.keys())) and self.parameters.get('subnet_name') is None:
+ self.module.fail_json(msg='Error: Missing one or more required parameters for creating interface: %s'
+ % ', '.join(keys))
+ # if role is intercluster, protocol cannot be specified
+ if self.parameters.get('role') == "intercluster" and self.parameters.get('protocols') is not None:
+ self.module.fail_json(msg='Error: Protocol cannot be specified for intercluster role,'
+ 'failed to create interface')
+
+ def create_interface(self):
+ ''' calling zapi to create interface '''
+ required_keys = set(['role', 'home_port'])
+ data_protocols_obj = None
+ if self.parameters.get('subnet_name') is None:
+ if self.parameters.get('is_ipv4_link_local') is not None:
+ if not self.parameters.get('is_ipv4_link_local'):
+ required_keys.add('address')
+ required_keys.add('netmask')
+ if self.parameters.get('service_policy') is not None:
+ required_keys.remove('role')
+ data_protocols_obj = self.set_protocol_option(required_keys)
+ self.validate_create_parameters(required_keys)
+
+ options = {'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']}
+ NetAppOntapInterface.set_options(options, self.parameters)
+ interface_create = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-create', **options)
+ if data_protocols_obj is not None:
+ interface_create.add_child_elem(data_protocols_obj)
+ try:
+ self.server.invoke_successfully(interface_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ # msg: "Error Creating interface ansible_interface: NetApp API failed. Reason - 17:A LIF with the same name already exists"
+ if to_native(exc.code) == "17":
+ self.na_helper.changed = False
+ else:
+ self.module.fail_json(msg='Error Creating interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_interface(self, current_status):
+ ''' calling zapi to delete interface '''
+ if current_status == 'up':
+ self.parameters['admin_status'] = 'down'
+ self.modify_interface({'admin_status': 'down'})
+
+ interface_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-interface-delete', **{'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']})
+ try:
+ self.server.invoke_successfully(interface_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def modify_interface(self, modify):
+ """
+ Modify the interface.
+ """
+ # Current_node and current_port don't exist in modify only migrate, so we need to remove them from the list
+ migrate = {}
+ if modify.get('current_node') is not None:
+ migrate['current_node'] = modify.pop('current_node')
+ if modify.get('current_port') is not None:
+ migrate['current_port'] = modify.pop('current_port')
+ if len(modify) > 0:
+ options = {'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']
+ }
+ NetAppOntapInterface.set_options(options, modify)
+ interface_modify = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-modify', **options)
+ try:
+ self.server.invoke_successfully(interface_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg='Error modifying interface %s: %s' %
+ (self.parameters['interface_name'], to_native(err)),
+ exception=traceback.format_exc())
+ # if home node has been changed we need to migrate the interface
+ if len(migrate) > 0:
+ self.migrate_interface()
+
+ def migrate_interface(self):
+ interface_migrate = netapp_utils.zapi.NaElement('net-interface-migrate')
+ if self.parameters.get('current_node') is None:
+ self.module.fail_json(msg='current_node must be set to migrate')
+ interface_migrate.add_new_child('destination-node', self.parameters['current_node'])
+ if self.parameters.get('current_port') is not None:
+ interface_migrate.add_new_child('destination-port', self.parameters['current_port'])
+ interface_migrate.add_new_child('lif', self.parameters['interface_name'])
+ interface_migrate.add_new_child('vserver', self.parameters['vserver'])
+ try:
+ self.server.invoke_successfully(interface_migrate, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching migrating %s: %s'
+ % (self.parameters['current_node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_interface", cserver)
+
+ def apply(self):
+ ''' calling all interface features '''
+
+ # Checking to see if autosupport_log() can be ran as this is a post cluster setup request.
+ try:
+ self.autosupport_log()
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13003 denotes cluster does not exist. It happens when running operations on a node not in cluster.
+ if to_native(error.code) == "13003":
+ pass
+ else:
+ self.module.fail_json(msg='Error calling autosupport_log(): %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ current = self.get_interface()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_interface()
+ elif cd_action == 'delete':
+ self.delete_interface(current['admin_status'])
+ elif modify:
+ self.modify_interface(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ interface = NetAppOntapInterface()
+ interface.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py
new file mode 100644
index 00000000..02ceb7c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+"""
+this is ipspace module
+
+# (c) 2018, NTT Europe Ltd.
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: na_ontap_ipspace
+
+short_description: NetApp ONTAP Manage an ipspace
+
+version_added: 2.9.0
+
+author:
+ - NTTE Storage Engineering (@vicmunoz) <cl.eng.sto@ntt.eu>
+
+description:
+ - Manage an ipspace for an Ontap Cluster
+
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+
+options:
+ state:
+ description:
+ - Whether the specified ipspace should exist or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the ipspace to manage
+ required: true
+ type: str
+ from_name:
+ description:
+ - Name of the existing ipspace to be renamed to name
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create ipspace
+ na_ontap_ipspace:
+ state: present
+ name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete ipspace
+ na_ontap_ipspace:
+ state: absent
+ name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Rename ipspace
+ na_ontap_ipspace:
+ state: present
+ name: ansibleIpspace_newname
+ from_name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapIpspace(object):
+ '''Class with ipspace operations'''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def ipspace_get_iter(self, name):
+ """
+ Return net-ipspaces-get-iter query results
+ :param name: Name of the ipspace
+ :return: NaElement if ipspace found, None otherwise
+ """
+ ipspace_get_iter = netapp_utils.zapi.NaElement('net-ipspaces-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-info', **{'ipspace': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ ipspace_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(
+ ipspace_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 14636 denotes an ipspace does not exist
+ # Error 13073 denotes an ipspace not found
+ if to_native(error.code) == "14636" or to_native(error.code) == "13073":
+ return None
+ else:
+ self.module.fail_json(
+ msg=to_native(error),
+ exception=traceback.format_exc())
+ return result
+
+ def get_ipspace(self, name=None):
+ """
+ Fetch details if ipspace exists
+ :param name: Name of the ipspace to be fetched
+ :return:
+ Dictionary of current details if ipspace found
+ None if ipspace is not found
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ api = 'network/ipspaces'
+ params = None
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ for record in message['records']:
+ if record['name'] == name:
+ return record
+ return None
+ else:
+ ipspace_get = self.ipspace_get_iter(name)
+ if (ipspace_get and ipspace_get.get_child_by_name('num-records') and
+ int(ipspace_get.get_child_content('num-records')) >= 1):
+ current_ipspace = dict()
+ attr_list = ipspace_get.get_child_by_name('attributes-list')
+ attr = attr_list.get_child_by_name('net-ipspaces-info')
+ current_ipspace['name'] = attr.get_child_content('ipspace')
+ return current_ipspace
+ return None
+
+ def create_ipspace(self):
+ """
+ Create ipspace
+ :return: None
+ """
+ if self.use_rest:
+ api = 'network/ipspaces'
+ params = {'name': self.parameters['name']}
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ ipspace_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-create', **{'ipspace': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(ipspace_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error provisioning ipspace %s: %s" % (
+ self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ipspace(self):
+ """
+ Destroy ipspace
+ :return: None
+ """
+ if self.use_rest:
+ current = self.get_ipspace()
+ if current is not None:
+ uuid = current['uuid']
+ api = 'network/ipspaces/' + uuid
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ ipspace_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-destroy',
+ **{'ipspace': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(
+ ipspace_destroy, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error removing ipspace %s: %s" % (
+ self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_ipspace(self):
+ """
+ Rename an ipspace
+ :return: Nothing
+ """
+ if self.use_rest:
+ current = self.get_ipspace(self.parameters['from_name'])
+ if current is None:
+ self.module.fail_json(msg="Error renaming ipspace %s" % (self.parameters['from_name']))
+ uuid = current['uuid']
+ api = 'network/ipspaces/' + uuid
+ params = {'name': self.parameters['name']}
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ ipspace_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-rename',
+ **{'ipspace': self.parameters['from_name'],
+ 'new-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(ipspace_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error renaming ipspace %s: %s" % (
+ self.parameters['from_name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to the ipspace
+ :return: Nothing
+ """
+ current = self.get_ipspace()
+ # rename and create are mutually exclusive
+ rename, cd_action = None, None
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(
+ self.get_ipspace(self.parameters['from_name']),
+ current)
+ if rename is None:
+ self.module.fail_json(
+ msg="Error renaming: ipspace %s does not exist" %
+ self.parameters['from_name'])
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_ipspace()
+ elif cd_action == 'create':
+ self.create_ipspace()
+ elif cd_action == 'delete':
+ self.delete_ipspace()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action
+ :return: nothing
+ """
+ obj = NetAppOntapIpspace()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py
new file mode 100644
index 00000000..8975327a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+
+# (c) 2017-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_iscsi
+
+short_description: NetApp ONTAP manage iSCSI service
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- create, delete, start, stop iSCSI service on SVM.
+
+options:
+
+ state:
+ description:
+ - Whether the service should be present or deleted.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ service_state:
+ description:
+ - Whether the specified service should running.
+ choices: ['started', 'stopped']
+ type: str
+
+ vserver:
+ required: true
+ type: str
+ description:
+ - The name of the vserver to use.
+
+'''
+
+EXAMPLES = """
+- name: Create iscsi service
+ na_ontap_iscsi:
+ state: present
+ service_state: started
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Stop Iscsi service
+ na_ontap_iscsi:
+ state: present
+ service_state: stopped
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Delete Iscsi service
+ na_ontap_iscsi:
+ state: absent
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapISCSI(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, type='str', choices=['started', 'stopped'], default=None),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ params = self.module.params
+
+ # set up state variables
+ self.state = params['state']
+ self.service_state = params['service_state']
+ if self.state == 'present' and self.service_state is None:
+ self.service_state = 'started'
+ self.vserver = params['vserver']
+ self.is_started = None
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.vserver)
+
+ def get_iscsi(self):
+ """
+ Return details about the iscsi service
+
+ :return: Details about the iscsi service
+ :rtype: dict
+ """
+ iscsi_info = netapp_utils.zapi.NaElement('iscsi-service-get-iter')
+ iscsi_attributes = netapp_utils.zapi.NaElement('iscsi-service-info')
+
+ iscsi_attributes.add_new_child('vserver', self.vserver)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(iscsi_attributes)
+
+ iscsi_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(iscsi_info, True)
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ iscsi = result.get_child_by_name(
+ 'attributes-list').get_child_by_name('iscsi-service-info')
+ if iscsi:
+ is_started = iscsi.get_child_content('is-available') == 'true'
+ return_value = {
+ 'is_started': is_started
+ }
+
+ return return_value
+
+ def create_iscsi_service(self):
+ """
+ Create iscsi service and start if requested
+ """
+ iscsi_service = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-create',
+ **{'start': 'true' if self.state == 'started' else 'false'
+ })
+
+ try:
+ self.server.invoke_successfully(
+ iscsi_service, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error creating iscsi service: % s"
+ % (to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_iscsi_service(self):
+ """
+ Delete the iscsi service
+ """
+ if self.is_started:
+ self.stop_iscsi_service()
+
+ iscsi_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-destroy')
+
+ try:
+ self.server.invoke_successfully(
+ iscsi_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting iscsi service \
+ on vserver %s: %s"
+ % (self.vserver, to_native(e)),
+ exception=traceback.format_exc())
+
+ def stop_iscsi_service(self):
+ """
+ Stop iscsi service
+ """
+
+ iscsi_stop = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-stop')
+
+ try:
+ self.server.invoke_successfully(iscsi_stop, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error Stopping iscsi service \
+ on vserver %s: %s"
+ % (self.vserver, to_native(e)),
+ exception=traceback.format_exc())
+
+ def start_iscsi_service(self):
+ """
+ Start iscsi service
+ """
+ iscsi_start = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-start')
+
+ try:
+ self.server.invoke_successfully(iscsi_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error starting iscsi service \
+ on vserver %s: %s"
+ % (self.vserver, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ property_changed = False
+ iscsi_service_exists = False
+ netapp_utils.ems_log_event("na_ontap_iscsi", self.server)
+ iscsi_service_detail = self.get_iscsi()
+
+ if iscsi_service_detail:
+ self.is_started = iscsi_service_detail['is_started']
+ iscsi_service_exists = True
+
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ is_started = 'started' if self.is_started else 'stopped'
+ property_changed = is_started != self.service_state
+
+ else:
+ if self.state == 'present':
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not iscsi_service_exists:
+ self.create_iscsi_service() # the service is stopped when initially created
+ if self.service_state == 'started':
+ self.start_iscsi_service()
+ if iscsi_service_exists and self.service_state == 'stopped':
+ self.stop_iscsi_service()
+
+ elif self.state == 'absent':
+ self.delete_iscsi_service()
+
+ changed = property_changed
+ # TODO: include other details about the lun (size, etc.)
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppOntapISCSI()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py
new file mode 100644
index 00000000..b1b16cd1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_iscsi_security
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Modify iscsi security.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_iscsi_security
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified initiator should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ auth_type:
+ description:
+ - Specifies the authentication type.
+ choices: ['chap', 'none', 'deny']
+ type: str
+ initiator:
+ description:
+ - Specifies the name of the initiator.
+ required: true
+ type: str
+ address_ranges:
+ description:
+ - May be a single IPv4 or IPv6 address or a range containing a startaddress and an end address.
+ - The start and end addresses themselves are included in the range.
+ - If not present, the initiator is allowed to log in from any IP address.
+ type: list
+ elements: str
+ inbound_username:
+ description:
+ - Inbound CHAP username.
+ - Required for CHAP. A null username is not allowed.
+ type: str
+ inbound_password:
+ description:
+ - Inbound CHAP user password.
+ - Can not be modified. If want to change password, delete and re-create the initiator.
+ type: str
+ outbound_username:
+ description:
+ - Outbound CHAP user name.
+ type: str
+ outbound_password:
+ description:
+ - Outbound CHAP user password.
+ - Can not be modified. If want to change password, delete and re-create the initiator.
+ type: str
+short_description: "NetApp ONTAP Manage iscsi security."
+version_added: "19.10.1"
+'''
+
+EXAMPLES = """
+ - name: create
+ na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ inbound_username: user_1
+ inbound_password: password_1
+ outbound_username: user_2
+ outbound_password: password_2
+ auth_type: chap
+ address_ranges: 10.125.10.0-10.125.10.10,10.125.193.78
+
+ - name: modify outbound username
+ na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ inbound_username: user_1
+ inbound_password: password_1
+ outbound_username: user_out_3
+ outbound_password: password_3
+ auth_type: chap
+ address_ranges: 10.125.10.0-10.125.10.10,10.125.193.78
+
+ - name: modify address
+ na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ address_ranges: 10.125.193.90,10.125.10.20-10.125.10.30
+"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPIscsiSecurity(object):
+ """
+ Class with iscsi security methods
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ auth_type=dict(required=False, type='str', choices=['chap', 'none', 'deny']),
+ inbound_password=dict(required=False, type='str', no_log=True),
+ inbound_username=dict(required=False, type='str'),
+ initiator=dict(required=True, type='str'),
+ address_ranges=dict(required=False, type='list', elements='str'),
+ outbound_password=dict(required=False, type='str', no_log=True),
+ outbound_username=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['auth_type', 'chap', ['inbound_username', 'inbound_password']]
+ ],
+ required_together=[
+ ['inbound_username', 'inbound_password'],
+ ['outbound_username', 'outbound_password'],
+ ],
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.uuid = self.get_svm_uuid()
+
+ def get_initiator(self):
+ """
+ Get current initiator.
+ :return: dict of current initiator details.
+ """
+ params = {'fields': '*', 'initiator': self.parameters['initiator']}
+ api = '/protocols/san/iscsi/credentials/'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching initiator: %s" % error)
+ if message['num_records'] > 0:
+ record = message['records'][0]
+ initiator_details = dict()
+ initiator_details['auth_type'] = record['authentication_type']
+ if initiator_details['auth_type'] == 'chap':
+ if record['chap'].get('inbound'):
+ initiator_details['inbound_username'] = record['chap']['inbound']['user']
+ else:
+ initiator_details['inbound_username'] = None
+ if record['chap'].get('outbound'):
+ initiator_details['outbound_username'] = record['chap']['outbound']['user']
+ else:
+ initiator_details['outbound_username'] = None
+ if record.get('initiator_address'):
+ if record['initiator_address'].get('ranges'):
+ ranges = []
+ for address_range in record['initiator_address']['ranges']:
+ if address_range['start'] == address_range['end']:
+ ranges.append(address_range['start'])
+ else:
+ ranges.append(address_range['start'] + '-' + address_range['end'])
+ initiator_details['address_ranges'] = ranges
+ else:
+ initiator_details['address_ranges'] = None
+ return initiator_details
+
+ def create_initiator(self):
+ """
+ Create initiator.
+ :return: None.
+ """
+ params = dict()
+ params['authentication_type'] = self.parameters['auth_type']
+ params['initiator'] = self.parameters['initiator']
+ if self.parameters['auth_type'] == 'chap':
+ chap_info = dict()
+ chap_info['inbound'] = {'user': self.parameters['inbound_username'], 'password': self.parameters['inbound_password']}
+ if self.parameters.get('outbound_username'):
+ chap_info['outbound'] = {'user': self.parameters['outbound_username'], 'password': self.parameters['outbound_password']}
+ params['chap'] = chap_info
+ address_info = self.get_address_info(self.parameters.get('address_ranges'))
+ if address_info is not None:
+ params['initiator_address'] = {'ranges': address_info}
+ params['svm'] = {'uuid': self.uuid, 'name': self.parameters['vserver']}
+ api = '/protocols/san/iscsi/credentials'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating initiator: %s" % error)
+
+ def delete_initiator(self):
+ """
+ Delete initiator.
+ :return: None.
+ """
+ api = '/protocols/san/iscsi/credentials/{0}/{1}'.format(self.uuid, self.parameters['initiator'])
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting initiator: %s" % error)
+
+ def modify_initiator(self, modify, current):
+ """
+ Modify initiator.
+ :param modify: dict of modify attributes.
+ :return: None.
+ """
+ params = dict()
+ use_chap = False
+ chap_update = False
+ chap_update_inbound = False
+ chap_update_outbound = False
+
+ if modify.get('auth_type') and modify['auth_type'] == 'chap':
+ # change in auth_type
+ chap_update = True
+ use_chap = True
+ elif current.get('auth_type') == 'chap':
+ # we're already using chap
+ use_chap = True
+
+ if use_chap and (modify.get('inbound_username') or modify.get('inbound_password')):
+ # change in chap inbound credentials
+ chap_update = True
+ chap_update_inbound = True
+
+ if use_chap and (modify.get('outbound_username') or modify.get('outbound_password')):
+ # change in chap outbound credentials
+ chap_update = True
+ chap_update_outbound = True
+
+ if chap_update:
+ chap_info = dict()
+ # set values from self.parameters as they may not show as modified
+ if chap_update_inbound:
+ chap_info['inbound'] = {'user': self.parameters['inbound_username'], 'password': self.parameters['inbound_password']}
+ else:
+ # use current values as inbound username/password are required
+ chap_info['inbound'] = {'user': current.get('inbound_username'), 'password': current.get('inbound_password')}
+ if chap_update_outbound:
+ chap_info['outbound'] = {'user': self.parameters['outbound_username'], 'password': self.parameters['outbound_password']}
+
+ params['chap'] = chap_info
+ address_info = self.get_address_info(modify.get('address_ranges'))
+ if address_info is not None:
+ params['initiator_address'] = {'ranges': address_info}
+ api = '/protocols/san/iscsi/credentials/{0}/{1}'.format(self.uuid, self.parameters['initiator'])
+ dummy, error = self.rest_api.patch(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying initiator: %s" % error)
+
+ def get_address_info(self, address_ranges):
+ if address_ranges is None:
+ return None
+ else:
+ address_info = []
+ for address in address_ranges:
+ address_range = {}
+ if '-' in address:
+ address_range['end'] = address.split('-')[1]
+ address_range['start'] = address.split('-')[0]
+ else:
+ address_range['end'] = address
+ address_range['start'] = address
+ address_info.append(address_range)
+ return address_info
+
+ def apply(self):
+ """
+ check create/delete/modify operations if needed.
+ :return: None.
+ """
+ current = self.get_initiator()
+ action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if action == 'create':
+ self.create_initiator()
+ elif action == 'delete':
+ self.delete_initiator()
+ elif modify:
+ self.modify_initiator(modify, current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def get_svm_uuid(self):
+ """
+ Get a svm's UUID
+ :return: uuid of the svm.
+ """
+ params = {'fields': 'uuid', 'name': self.parameters['vserver']}
+ api = "svm/svms"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching svm uuid: %s" % error)
+ return message['records'][0]['uuid']
+
+
+def main():
+ """Execute action"""
+ iscsi_obj = NetAppONTAPIscsiSecurity()
+ iscsi_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py
new file mode 100644
index 00000000..db2c3a1f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_job_schedule
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_job_schedule
+short_description: NetApp ONTAP Job Schedule
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Delete/Modify job-schedules on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified job schedule should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the job-schedule to manage.
+ required: true
+ type: str
+ job_minutes:
+ description:
+ - The minute(s) of each hour when the job should be run.
+ Job Manager cron scheduling minute.
+ -1 represents all minutes and is
+ only supported for cron schedule create and modify.
+ Range is [-1..59]
+ type: list
+ elements: str
+ job_hours:
+ version_added: 2.8.0
+ description:
+ - The hour(s) of the day when the job should be run.
+ Job Manager cron scheduling hour.
+ -1 represents all hours and is
+ only supported for cron schedule create and modify.
+ Range is [-1..23]
+ type: list
+ elements: str
+ job_months:
+ version_added: 2.8.0
+ description:
+ - The month(s) when the job should be run.
+ Job Manager cron scheduling month.
+ -1 represents all months and is
+ only supported for cron schedule create and modify.
+ Range is [-1..11]
+ type: list
+ elements: str
+ job_days_of_month:
+ version_added: 2.8.0
+ description:
+ - The day(s) of the month when the job should be run.
+ Job Manager cron scheduling day of month.
+ -1 represents all days of a month from 1 to 31, and is
+ only supported for cron schedule create and modify.
+ Range is [-1..31]
+ type: list
+ elements: str
+ job_days_of_week:
+ version_added: 2.8.0
+ description:
+ - The day(s) in the week when the job should be run.
+ Job Manager cron scheduling day of week.
+ Zero represents Sunday. -1 represents all days of a week and is
+ only supported for cron schedule create and modify.
+ Range is [-1..6]
+ type: list
+ elements: str
+'''
+
+EXAMPLES = """
+ - name: Create Job for 11.30PM at 10th of every month
+ na_ontap_job_schedule:
+ state: present
+ name: jobName
+ job_minutes: 30
+ job_hours: 23
+ job_days_of_month: 10
+ job_months: -1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete Job
+ na_ontap_job_schedule:
+ state: absent
+ name: jobName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPJob(object):
+ '''Class with job schedule cron methods'''
+
+ def __init__(self):
+
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ job_minutes=dict(required=False, type='list', elements='str'),
+ job_months=dict(required=False, type='list', elements='str'),
+ job_hours=dict(required=False, type='list', elements='str'),
+ job_days_of_month=dict(required=False, type='list', elements='str'),
+ job_days_of_week=dict(required=False, type='list', elements='str')
+ ))
+
+ self.uuid = None
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+ self.set_playbook_api_key_map()
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'name': 'job-schedule-name',
+ }
+ self.na_helper.zapi_list_keys = {
+ 'job_minutes': ('job-schedule-cron-minute', 'cron-minute'),
+ 'job_months': ('job-schedule-cron-month', 'cron-month'),
+ 'job_hours': ('job-schedule-cron-hour', 'cron-hour'),
+ 'job_days_of_month': ('job-schedule-cron-day', 'cron-day-of-month'),
+ 'job_days_of_week': ('job-schedule-cron-day-of-week', 'cron-day-of-week')
+ }
+
+ def set_playbook_api_key_map(self):
+ self.na_helper.api_list_keys = {
+ 'job_minutes': 'minutes',
+ 'job_months': 'months',
+ 'job_hours': 'hours',
+ 'job_days_of_month': 'days',
+ 'job_days_of_week': 'weekdays'
+ }
+
+ def get_job_schedule(self):
+ """
+ Return details about the job
+ :param:
+ name : Job name
+ :return: Details about the Job. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ params = {'name': self.parameters['name']}
+ api = '/cluster/schedules'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching job schedule: %s" % error)
+ if message['num_records'] > 0:
+ self.uuid = message['records'][0]['uuid']
+ job_details = dict()
+ job_details['name'] = message['records'][0]['name']
+ for key, value in self.na_helper.api_list_keys.items():
+ if value in message['records'][0]['cron']:
+ job_details[key] = message['records'][0]['cron'][value]
+ # convert list of int to list of string
+ for key, value in job_details.items():
+ if isinstance(value, list):
+ job_details[key] = [str(x) for x in value]
+ return job_details
+
+ else:
+ job_get_iter = netapp_utils.zapi.NaElement('job-schedule-cron-get-iter')
+ job_get_iter.translate_struct({
+ 'query': {
+ 'job-schedule-cron-info': {
+ 'job-schedule-name': self.parameters['name']
+ }
+ }
+ })
+ result = self.server.invoke_successfully(job_get_iter, True)
+ job_details = None
+ # check if job exists
+ if result.get_child_by_name('num-records') and int(result['num-records']) >= 1:
+ job_info = result['attributes-list']['job-schedule-cron-info']
+ job_details = dict()
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ job_details[item_key] = job_info[zapi_key]
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ job_details[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=job_info.get_child_by_name(parent)
+ )
+ # if any of the job_hours, job_minutes, job_months, job_days are empty:
+ # it means the value is -1 for ZAPI
+ if not job_details[item_key]:
+ job_details[item_key] = ['-1']
+ return job_details
+
+ def add_job_details(self, na_element_object, values):
+ """
+ Add children node for create or modify NaElement object
+ :param na_element_object: modify or create NaElement object
+ :param values: dictionary of cron values to be added
+ :return: None
+ """
+ for item_key in values:
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ na_element_object[zapi_key] = values[item_key]
+ elif item_key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key)
+ na_element_object.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent=parent_key,
+ zapi_child=child_key,
+ data=values.get(item_key)))
+
+ def create_job_schedule(self):
+ """
+ Creates a job schedule
+ """
+ # job_minutes is mandatory for create
+ if self.parameters.get('job_minutes') is None:
+ self.module.fail_json(msg='Error: missing required parameter job_minutes for create')
+
+ if self.use_rest:
+ cron = dict()
+ for key, value in self.na_helper.api_list_keys.items():
+ # -1 means all in zapi, while empty means all in api.
+ if self.parameters.get(key):
+ if len(self.parameters[key]) == 1 and int(self.parameters[key][0]) == -1:
+ # need to set empty value for minutes as this is required parameter
+ if value == 'minutes':
+ cron[value] = []
+ else:
+ cron[value] = self.parameters[key]
+
+ params = {
+ 'name': self.parameters['name'],
+ 'cron': cron
+ }
+ api = '/cluster/schedules'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating job schedule: %s" % error)
+
+ else:
+ job_schedule_create = netapp_utils.zapi.NaElement('job-schedule-cron-create')
+ self.add_job_details(job_schedule_create, self.parameters)
+ try:
+ self.server.invoke_successfully(job_schedule_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_job_schedule(self):
+ """
+ Delete a job schedule
+ """
+ if self.use_rest:
+ api = '/cluster/schedules/' + self.uuid
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting job schedule: %s" % error)
+ else:
+ job_schedule_delete = netapp_utils.zapi.NaElement('job-schedule-cron-destroy')
+ self.add_job_details(job_schedule_delete, self.parameters)
+ try:
+ self.server.invoke_successfully(job_schedule_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_job_schedule(self, params, current):
+ """
+ modify a job schedule
+ """
+ if self.use_rest:
+ cron = dict()
+ for key, value in self.na_helper.api_list_keys.items():
+ # -1 means all in zapi, while empty means all in api.
+ if params.get(key):
+ if len(self.parameters[key]) == 1 and int(self.parameters[key][0]) == -1:
+ pass
+ else:
+ cron[value] = self.parameters[key]
+ # Usually only include modify attributes, but omitting an attribute means all in api.
+ # Need to add the current attributes in params.
+ elif current.get(key):
+ cron[value] = current[key]
+ params = {
+ 'cron': cron
+ }
+ api = '/cluster/schedules/' + self.uuid
+ dummy, error = self.rest_api.patch(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying job schedule: %s" % error)
+ else:
+ job_schedule_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'job-schedule-cron-modify', **{'job-schedule-name': self.parameters['name']})
+ self.add_job_details(job_schedule_modify, params)
+ try:
+ self.server.invoke_successfully(job_schedule_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ Autosupport log for job_schedule
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_job_schedule", cserver)
+
+ def apply(self):
+ """
+ Apply action to job-schedule
+ """
+ if not self.use_rest:
+ self.autosupport_log()
+ current = self.get_job_schedule()
+ action = self.na_helper.get_cd_action(current, self.parameters)
+ if action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if action == 'create':
+ self.create_job_schedule()
+ elif action == 'delete':
+ self.delete_job_schedule()
+ elif modify:
+ self.modify_job_schedule(modify, current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Execute action'''
+ job_obj = NetAppONTAPJob()
+ job_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py
new file mode 100644
index 00000000..6517f4b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+'''
+(c) 2019, Red Hat, Inc
+GNU General Public License v3.0+
+(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_ontap_kerberos_realm
+
+short_description: NetApp ONTAP vserver nfs kerberos realm
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>,<mzink@redhat.com>
+
+description:
+- Create, modify or delete vserver kerberos realm configuration
+
+options:
+
+ state:
+ description:
+ - Whether the Kerberos realm is present or absent.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm with kerberos realm configured
+ required: true
+ type: str
+
+ realm:
+ description:
+ - Kerberos realm name
+ required: true
+ type: str
+
+ kdc_vendor:
+ description:
+ - The vendor of the Key Distribution Centre (KDC) server
+ - Required if I(state=present)
+ choices: ['other', 'microsoft']
+ type: str
+
+ kdc_ip:
+ description:
+ - IP address of the Key Distribution Centre (KDC) server
+ - Required if I(state=present)
+ type: str
+
+ kdc_port:
+ description:
+ - TCP port on the KDC to be used for Kerberos communication.
+ - The default for this parameter is '88'.
+ type: str
+
+ clock_skew:
+ description:
+ - The clock skew in minutes is the tolerance for accepting tickets with time stamps that do not exactly match the host's system clock.
+ - The default for this parameter is '5' minutes.
+ type: str
+
+ comment:
+ description:
+ - Optional comment
+ type: str
+
+ admin_server_ip:
+ description:
+ - IP address of the host where the Kerberos administration daemon is running. This is usually the master KDC.
+ - If this parameter is omitted, the address specified in kdc_ip is used.
+ type: str
+
+ admin_server_port:
+ description:
+ - The TCP port on the Kerberos administration server where the Kerberos administration service is running.
+ - The default for this parmater is '749'
+ type: str
+
+ pw_server_ip:
+ description:
+ - IP address of the host where the Kerberos password-changing server is running.
+ - Typically, this is the same as the host indicated in the adminserver-ip.
+ - If this parameter is omitted, the IP address in kdc-ip is used.
+ type: str
+
+ pw_server_port:
+ description:
+ - The TCP port on the Kerberos password-changing server where the Kerberos password-changing service is running.
+ - The default for this parameter is '464'.
+ type: str
+
+ ad_server_ip:
+ description:
+ - IP Address of the Active Directory Domain Controller (DC). This is a mandatory parameter if the kdc-vendor is 'microsoft'.
+ type: str
+ version_added: '20.4.0'
+
+ ad_server_name:
+ description:
+ - Host name of the Active Directory Domain Controller (DC). This is a mandatory parameter if the kdc-vendor is 'microsoft'.
+ type: str
+ version_added: '20.4.0'
+'''
+
+EXAMPLES = '''
+
+ - name: Create kerberos realm other kdc vendor
+ na_ontap_kerberos_realm:
+ state: present
+ realm: 'EXAMPLE.COM'
+ vserver: 'vserver1'
+ kdc_ip: '1.2.3.4'
+ kdc_vendor: 'other'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create kerberos realm Microsoft kdc vendor
+ na_ontap_kerberos_realm:
+ state: present
+ realm: 'EXAMPLE.COM'
+ vserver: 'vserver1'
+ kdc_ip: '1.2.3.4'
+ kdc_vendor: 'microsoft'
+ ad_server_ip: '0.0.0.0'
+ ad_server_name: 'server'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapKerberosRealm(object):
+ '''
+ Kerberos Realm definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ admin_server_ip=dict(required=False, type='str'),
+ admin_server_port=dict(required=False, type='str'),
+ clock_skew=dict(required=False, type='str'),
+ comment=dict(required=False, type='str'),
+ kdc_ip=dict(required=False, type='str'),
+ kdc_port=dict(required=False, type='str'),
+ kdc_vendor=dict(required=False, type='str',
+ choices=['microsoft', 'other']),
+ pw_server_ip=dict(required=False, type='str'),
+ pw_server_port=dict(required=False, type='str'),
+ realm=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ ad_server_ip=dict(required=False, type='str'),
+ ad_server_name=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['kdc_vendor', 'kdc_ip']), ('kdc_vendor', 'microsoft', ['ad_server_ip', 'ad_server_name'])],
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ self.simple_attributes = [
+ 'admin_server_ip',
+ 'admin_server_port',
+ 'clock_skew',
+ 'kdc_ip',
+ 'kdc_port',
+ 'kdc_vendor',
+ ]
+
+ def get_krbrealm(self, realm_name=None, vserver_name=None):
+ '''
+ Checks if Kerberos Realm config exists.
+
+ :return:
+ kerberos realm object if found
+ None if not found
+ :rtype: object/None
+ '''
+ # Make query
+ krbrealm_info = netapp_utils.zapi.NaElement('kerberos-realm-get-iter')
+
+ if realm_name is None:
+ realm_name = self.parameters['realm']
+
+ if vserver_name is None:
+ vserver_name = self.parameters['vserver']
+
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm', **{'realm': realm_name, 'vserver-name': vserver_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ krbrealm_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(krbrealm_info, enable_tunneling=True)
+
+ # Get Kerberos Realm details
+ krbrealm_details = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ attributes_list = result.get_child_by_name('attributes-list')
+ config_info = attributes_list.get_child_by_name('kerberos-realm')
+
+ krbrealm_details = {
+ 'admin_server_ip': config_info.get_child_content('admin-server-ip'),
+ 'admin_server_port': config_info.get_child_content('admin-server-port'),
+ 'clock_skew': config_info.get_child_content('clock-skew'),
+ 'kdc_ip': config_info.get_child_content('kdc-ip'),
+ 'kdc_port': config_info.get_child_content('kdc-port'),
+ 'kdc_vendor': config_info.get_child_content('kdc-vendor'),
+ 'pw_server_ip': config_info.get_child_content('password-server-ip'),
+ 'pw_server_port': config_info.get_child_content('password-server-port'),
+ 'realm': config_info.get_child_content('realm'),
+ 'vserver': config_info.get_child_content('vserver-name'),
+ 'ad_server_ip': config_info.get_child_content('ad-server-ip'),
+ 'ad_server_name': config_info.get_child_content('ad-server-name')
+ }
+
+ return krbrealm_details
+
+ def create_krbrealm(self):
+ '''supported
+ Create Kerberos Realm configuration
+ '''
+ options = {
+ 'realm': self.parameters['realm']
+ }
+
+ # Other options/attributes
+ for attribute in self.simple_attributes:
+ if self.parameters.get(attribute) is not None:
+ options[str(attribute).replace('_', '-')] = self.parameters[attribute]
+
+ if self.parameters.get('pw_server_ip') is not None:
+ options['password-server-ip'] = self.parameters['pw_server_ip']
+ if self.parameters.get('pw_server_port') is not None:
+ options['password-server-port'] = self.parameters['pw_server_port']
+
+ if self.parameters.get('ad_server_ip') is not None:
+ options['ad-server-ip'] = self.parameters['ad_server_ip']
+ if self.parameters.get('ad_server_name') is not None:
+ options['ad-server-name'] = self.parameters['ad_server_name']
+
+ # Initialize NaElement
+ krbrealm_create = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm-create', **options)
+
+ # Try to create Kerberos Realm configuration
+ try:
+ self.server.invoke_successfully(krbrealm_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error creating Kerberos Realm configuration %s: %s' % (self.parameters['realm'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def delete_krbrealm(self):
+ '''
+ Delete Kerberos Realm configuration
+ '''
+ krbrealm_delete = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm-delete', **{'realm': self.parameters['realm']})
+
+ try:
+ self.server.invoke_successfully(krbrealm_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting Kerberos Realm configuration %s: %s' % (
+ self.parameters['realm'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_krbrealm(self, modify):
+ '''
+ Modify Kerberos Realm
+ :param modify: list of modify attributes
+ '''
+ krbrealm_modify = netapp_utils.zapi.NaElement('kerberos-realm-modify')
+ krbrealm_modify.add_new_child('realm', self.parameters['realm'])
+
+ for attribute in modify:
+ if attribute in self.simple_attributes:
+ krbrealm_modify.add_new_child(str(attribute).replace('_', '-'), self.parameters[attribute])
+ if attribute == 'pw_server_ip':
+ krbrealm_modify.add_new_child('password-server-ip', self.parameters['pw_server_ip'])
+ if attribute == 'pw_server_port':
+ krbrealm_modify.add_new_child('password-server-port', self.parameters['pw_server_port'])
+ if attribute == 'ad_server_ip':
+ krbrealm_modify.add_new_child('ad-server-ip', self.parameters['ad_server_ip'])
+ if attribute == 'ad_server_name':
+ krbrealm_modify.add_new_child('ad-server-name', self.parameters['ad_server_name'])
+
+ # Try to modify Kerberos Realm
+ try:
+ self.server.invoke_successfully(krbrealm_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error modifying Kerberos Realm %s: %s' % (self.parameters['realm'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_krbrealm()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ # create an ems log event for users with auto support turned on
+ netapp_utils.ems_log_event("na_ontap_kerberos_realm", self.server)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_krbrealm()
+ elif cd_action == 'delete':
+ self.delete_krbrealm()
+ elif modify:
+ self.modify_krbrealm(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+#
+# MAIN
+#
+def main():
+ '''ONTAP Kerberos Realm'''
+ krbrealm = NetAppOntapKerberosRealm()
+ krbrealm.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py
new file mode 100644
index 00000000..be34cac4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+'''
+(c) 2018-2019, NetApp, Inc
+GNU General Public License v3.0+
+(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_ontap_ldap
+
+short_description: NetApp ONTAP LDAP
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>/<mzink@redhat.com>
+
+description:
+- Create, modify or delete LDAP on NetApp ONTAP SVM/vserver
+
+options:
+
+ state:
+ description:
+ - Whether the LDAP is present or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm configured to use LDAP
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of LDAP client configuration
+ required: true
+ type: str
+
+ skip_config_validation:
+ description:
+ - Skip LDAP validation
+ choices: ['true', 'false']
+ type: str
+'''
+
+EXAMPLES = '''
+
+ - name: Enable LDAP on SVM
+ na_ontap_ldap:
+ state: present
+ name: 'example_ldap'
+ vserver: 'vserver1'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLDAP(object):
+ '''
+ LDAP Client definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ skip_config_validation=dict(required=False, default=None, choices=['true', 'false']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ldap(self, client_config_name=None):
+ '''
+ Checks if LDAP config exists.
+
+ :return:
+ ldap config object if found
+ None if not found
+ :rtype: object/None
+ '''
+ # Make query
+ config_info = netapp_utils.zapi.NaElement('ldap-config-get-iter')
+
+ if client_config_name is None:
+ client_config_name = self.parameters['name']
+
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config', **{'client-config': client_config_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ config_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(config_info, enable_tunneling=True)
+
+ # Get LDAP configuration details
+ config_details = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ attributes_list = result.get_child_by_name('attributes-list')
+ config_info = attributes_list.get_child_by_name('ldap-config')
+
+ # Define config details structure
+ config_details = {'client_config': config_info.get_child_content('client-config'),
+ 'skip_config_validation': config_info.get_child_content('skip-config-validation'),
+ 'vserver': config_info.get_child_content('vserver')}
+
+ return config_details
+
+ def create_ldap(self):
+ '''
+ Create LDAP configuration
+ '''
+ options = {
+ 'client-config': self.parameters['name'],
+ 'client-enabled': 'true'
+ }
+
+ if self.parameters.get('skip_config_validation') is not None:
+ options['skip-config-validation'] = self.parameters['skip_config_validation']
+
+ # Initialize NaElement
+ ldap_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config-create', **options)
+
+ # Try to create LDAP configuration
+ try:
+ self.server.invoke_successfully(ldap_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error creating LDAP configuration %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def delete_ldap(self):
+ '''
+ Delete LDAP configuration
+ '''
+ ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config-delete', **{})
+
+ try:
+ self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting LDAP configuration %s: %s' % (
+ self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_ldap(self, modify):
+ '''
+ Modify LDAP
+ :param modify: list of modify attributes
+ '''
+ ldap_modify = netapp_utils.zapi.NaElement('ldap-config-modify')
+ ldap_modify.add_new_child('client-config', self.parameters['name'])
+
+ for attribute in modify:
+ if attribute == 'skip_config_validation':
+ ldap_modify.add_new_child('skip-config-validation', self.parameters[attribute])
+
+ # Try to modify LDAP
+ try:
+ self.server.invoke_successfully(ldap_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error modifying LDAP %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_ldap()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ # create an ems log event for users with auto support turned on
+ netapp_utils.ems_log_event("na_ontap_ldap", self.server)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_ldap()
+ elif cd_action == 'delete':
+ self.delete_ldap()
+ elif modify:
+ self.modify_ldap(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+#
+# MAIN
+#
+def main():
+ '''ONTAP LDAP client configuration'''
+ ldapclient = NetAppOntapLDAP()
+ ldapclient.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py
new file mode 100644
index 00000000..c0133863
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py
@@ -0,0 +1,419 @@
+#!/usr/bin/python
+'''
+(c) 2018-2019, NetApp, Inc
+GNU General Public License v3.0+
+(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_ontap_ldap_client
+
+short_description: NetApp ONTAP LDAP client
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>/<mzink@redhat.com>
+
+description:
+- Create, modify or delete LDAP client on NetApp ONTAP
+
+options:
+
+ state:
+ description:
+ - Whether the specified LDAP client configuration exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm that holds LDAP client configuration
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of LDAP client configuration
+ required: true
+ type: str
+
+ ldap_servers:
+ description:
+ - Comma separated list of LDAP servers. FQDN's or IP addreses
+ - Required if I(state=present).
+ type: list
+ elements: str
+
+ schema:
+ description:
+ - LDAP schema
+ - Required if I(state=present).
+ choices: ['AD-IDMU', 'AD-SFU', 'MS-AD-BIS', 'RFC-2307']
+ type: str
+
+ ad_domain:
+ description:
+ - Active Directory Domain Name
+ type: str
+
+ base_dn:
+ description:
+ - LDAP base DN
+ type: str
+
+ base_scope:
+ description:
+ - LDAP search scope
+ choices: ['subtree', 'onelevel', 'base']
+ type: str
+
+ bind_as_cifs_server:
+ description:
+ - The cluster uses the CIFS server's credentials to bind to the LDAP server.
+ type: bool
+
+ preferred_ad_servers:
+ description:
+ - Preferred Active Directory (AD) Domain Controllers
+ type: list
+ elements: str
+
+ port:
+ description:
+ - LDAP server port
+ type: int
+
+ query_timeout:
+ description:
+ - LDAP server query timeout
+ type: int
+
+ min_bind_level:
+ description:
+ - Minimal LDAP server bind level.
+ choices: ['anonymous', 'simple', 'sasl']
+ type: str
+
+ bind_dn:
+ description:
+ - LDAP bind user DN
+ type: str
+
+ bind_password:
+ description:
+ - LDAP bind user password
+ type: str
+
+ use_start_tls:
+ description:
+ - Start TLS on LDAP connection
+ type: bool
+
+ referral_enabled:
+ description:
+ - LDAP Referral Chasing
+ type: bool
+
+ session_security:
+ description:
+ - Client Session Security
+ choices: ['none', 'sign', 'seal']
+ type: str
+'''
+
+EXAMPLES = '''
+
+ - name: Create LDAP client
+ na_ontap_ldap_client:
+ state: present
+ name: 'example_ldap'
+ vserver: 'vserver1'
+ ldap_servers: 'ldap1.example.company.com,ldap2.example.company.com'
+ base_dn: 'dc=example,dc=company,dc=com'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLDAPClient(object):
+ '''
+ LDAP Client definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ ad_domain=dict(required=False, default=None, type='str'),
+ base_dn=dict(required=False, type='str'),
+ base_scope=dict(required=False, default=None, choices=['subtree', 'onelevel', 'base']),
+ bind_as_cifs_server=dict(required=False, type='bool'),
+ bind_dn=dict(required=False, default=None, type='str'),
+ bind_password=dict(type='str', required=False, default=None, no_log=True),
+ name=dict(required=True, type='str'),
+ ldap_servers=dict(required=False, type='list', elements='str'),
+ min_bind_level=dict(required=False, default=None, choices=['anonymous', 'simple', 'sasl']),
+ preferred_ad_servers=dict(required=False, type='list', elements='str'),
+ port=dict(required=False, default=None, type='int'),
+ query_timeout=dict(required=False, default=None, type='int'),
+ referral_enabled=dict(required=False, type='bool'),
+ schema=dict(required=False, default=None, choices=['AD-IDMU', 'AD-SFU', 'MS-AD-BIS', 'RFC-2307']),
+ session_security=dict(required=False, default=None, choices=['none', 'sign', 'seal']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ use_start_tls=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['schema']),
+ ],
+ mutually_exclusive=[
+ ['ldap_servers', 'ad_domain'],
+ ['ldap_servers', 'preferred_ad_servers']
+ ],
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ self.simple_attributes = [
+ 'ad_domain',
+ 'base_dn',
+ 'base_scope',
+ 'bind_as_cifs_server',
+ 'bind_dn',
+ 'bind_password',
+ 'min_bind_level',
+ 'port',
+ 'query_timeout',
+ 'referral_enabled',
+ 'session_security',
+ 'use_start_tls'
+ ]
+
+ def get_ldap_client(self, client_config_name=None, vserver_name=None):
+ '''
+ Checks if LDAP client config exists.
+
+ :return:
+ ldap client config object if found
+ None if not found
+ :rtype: object/None
+ '''
+ # Make query
+ client_config_info = netapp_utils.zapi.NaElement('ldap-client-get-iter')
+
+ if client_config_name is None:
+ client_config_name = self.parameters['name']
+
+ if vserver_name is None:
+ vserver_name = '*'
+
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client',
+ **{
+ 'ldap-client-config': client_config_name,
+ 'vserver': vserver_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ client_config_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(client_config_info, enable_tunneling=False)
+
+ # Get LDAP client configuration details
+ client_config_details = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ client_config_info = attributes_list.get_child_by_name('ldap-client')
+ # Get LDAP servers list
+ ldap_server_list = list()
+ get_list = client_config_info.get_child_by_name('ldap-servers')
+ if get_list is not None:
+ ldap_server_list = [x.get_content() for x in get_list.get_children()]
+
+ preferred_ad_servers_list = list()
+ get_pref_ad_server_list = client_config_info.get_child_by_name('preferred-ad-servers')
+ if get_pref_ad_server_list is not None:
+ preferred_ad_servers_list = [x.get_content() for x in get_pref_ad_server_list.get_children()]
+
+ # Define config details structure
+ client_config_details = {
+ 'name': client_config_info.get_child_content('ldap-client-config'),
+ 'ldap_servers': ldap_server_list,
+ 'ad_domain': client_config_info.get_child_content('ad-domain'),
+ 'base_dn': client_config_info.get_child_content('base-dn'),
+ 'base_scope': client_config_info.get_child_content('base-scope'),
+ 'bind_as_cifs_server': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('bind-as-cifs-server')),
+ 'bind_dn': client_config_info.get_child_content('bind-dn'),
+ 'bind_password': client_config_info.get_child_content('bind-password'),
+ 'min_bind_level': client_config_info.get_child_content('min-bind-level'),
+ 'port': self.na_helper.get_value_for_int(from_zapi=True, value=client_config_info.get_child_content('port')),
+ 'preferred_ad_servers': preferred_ad_servers_list,
+ 'query_timeout': self.na_helper.get_value_for_int(from_zapi=True,
+ value=client_config_info.get_child_content('query-timeout')),
+ 'referral_enabled': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('referral-enabled')),
+ 'schema': client_config_info.get_child_content('schema'),
+ 'session_security': client_config_info.get_child_content('session-security'),
+ 'use_start_tls': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('use-start-tls'))
+ }
+ return client_config_details
+
+ def create_ldap_client(self):
+ '''
+ Create LDAP client configuration
+ '''
+
+ options = {
+ 'ldap-client-config': self.parameters['name'],
+ 'schema': self.parameters['schema'],
+ }
+
+ # Other options/attributes
+ for attribute in self.simple_attributes:
+ if self.parameters.get(attribute) is not None:
+ options[str(attribute).replace('_', '-')] = str(self.parameters[attribute])
+
+ # Initialize NaElement
+ ldap_client_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client-create', **options)
+
+ # LDAP servers NaElement
+ if self.parameters.get('ldap_servers') is not None:
+ ldap_servers_element = netapp_utils.zapi.NaElement('ldap-servers')
+ for ldap_server_name in self.parameters['ldap_servers']:
+ ldap_servers_element.add_new_child('string', ldap_server_name)
+ ldap_client_create.add_child_elem(ldap_servers_element)
+
+ # preferred_ad_servers
+ if self.parameters.get('preferred_ad_servers') is not None:
+ preferred_ad_servers_element = netapp_utils.zapi.NaElement('preferred-ad-servers')
+ for pref_ad_server in self.parameters['preferred_ad_servers']:
+ preferred_ad_servers_element.add_new_child('ip-address', pref_ad_server)
+ ldap_client_create.add_child_elem(preferred_ad_servers_element)
+
+ # Try to create LDAP configuration
+ try:
+ self.server.invoke_successfully(ldap_client_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(
+ msg='Error creating LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def delete_ldap_client(self):
+ '''
+ Delete LDAP client configuration
+ '''
+ ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ldap-client-delete', **{'ldap-client-config': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting LDAP client configuration %s: %s' % (
+ self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_ldap_client(self, modify):
+ '''
+ Modify LDAP client
+ :param modify: list of modify attributes
+ '''
+ ldap_client_modify = netapp_utils.zapi.NaElement('ldap-client-modify')
+ ldap_client_modify.add_new_child('ldap-client-config', self.parameters['name'])
+
+ for attribute in modify:
+ # LDAP_servers
+ if attribute == 'ldap_servers':
+ ldap_servers_element = netapp_utils.zapi.NaElement('ldap-servers')
+ for ldap_server_name in self.parameters['ldap_servers']:
+ ldap_servers_element.add_new_child('string', ldap_server_name)
+ ldap_client_modify.add_child_elem(ldap_servers_element)
+ # preferred_ad_servers
+ if attribute == 'preferred_ad_servers':
+ preferred_ad_servers_element = netapp_utils.zapi.NaElement('preferred-ad-servers')
+ ldap_client_modify.add_child_elem(preferred_ad_servers_element)
+ for pref_ad_server in self.parameters['preferred_ad_servers']:
+ preferred_ad_servers_element.add_new_child('ip-address', pref_ad_server)
+ # Simple attributes
+ if attribute in self.simple_attributes:
+ ldap_client_modify.add_new_child(str(attribute).replace('_', '-'), str(self.parameters[attribute]))
+
+ # Try to modify LDAP client
+ try:
+ self.server.invoke_successfully(ldap_client_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(
+ msg='Error modifying LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_ldap_client()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ # state is present, either ldap_servers or ad_domain is required
+ if self.parameters['state'] == 'present' and not self.parameters.get('ldap_servers') \
+ and self.parameters.get('ad_domain') is None:
+ self.module.fail_json(msg='Required one of ldap_servers or ad_domain')
+
+ if self.parameters['state'] == 'present' and cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ # create an ems log event for users with auto support turned on
+ netapp_utils.ems_log_event("na_ontap_ldap_client", self.server)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_ldap_client()
+ elif cd_action == 'delete':
+ self.delete_ldap_client()
+ elif modify:
+ self.modify_ldap_client(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+#
+# MAIN
+#
+def main():
+ '''ONTAP LDAP client configuration'''
+ ldapclient = NetAppOntapLDAPClient()
+ ldapclient.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py
new file mode 100644
index 00000000..9adee9bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py
@@ -0,0 +1,333 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_license
+
+short_description: NetApp ONTAP protocol and feature licenses
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Add or remove licenses on NetApp ONTAP.
+
+options:
+ state:
+ description:
+ - Whether the specified license should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ remove_unused:
+ description:
+ - Remove licenses that have no controller affiliation in the cluster.
+ type: bool
+
+ remove_expired:
+ description:
+ - Remove licenses that have expired in the cluster.
+ type: bool
+
+ serial_number:
+ description:
+ Serial number of the node associated with the license.
+ This parameter is used primarily when removing license for a specific service.
+ type: str
+
+ license_names:
+ type: list
+ elements: str
+ description:
+ - List of license-names to delete.
+ suboptions:
+ base:
+ description:
+ - Cluster Base License
+ nfs:
+ description:
+ - NFS License
+ cifs:
+ description:
+ - CIFS License
+ iscsi:
+ description:
+ - iSCSI License
+ fcp:
+ description:
+ - FCP License
+ cdmi:
+ description:
+ - CDMI License
+ snaprestore:
+ description:
+ - SnapRestore License
+ snapmirror:
+ description:
+ - SnapMirror License
+ flexclone:
+ description:
+ - FlexClone License
+ snapvault:
+ description:
+ - SnapVault License
+ snaplock:
+ description:
+ - SnapLock License
+ snapmanagersuite:
+ description:
+ - SnapManagerSuite License
+ snapprotectapps:
+ description:
+ - SnapProtectApp License
+ v_storageattach:
+ description:
+ - Virtual Attached Storage License
+
+ license_codes:
+ description:
+ - List of license codes to be added.
+ type: list
+ elements: str
+
+'''
+
+
+EXAMPLES = """
+- name: Add licenses
+ na_ontap_license:
+ state: present
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ serial_number: #################
+ license_codes: CODE1,CODE2
+
+- name: Remove licenses
+ na_ontap_license:
+ state: absent
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ remove_unused: false
+ remove_expired: true
+ serial_number: #################
+ license_names: nfs,cifs
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+def local_cmp(a, b):
+ """
+ compares with only values and not keys, keys should be the same for both dicts
+ :param a: dict 1
+ :param b: dict 2
+ :return: difference of values in both dicts
+ """
+ diff = [key for key in a if a[key] != b[key]]
+ return len(diff)
+
+
+class NetAppOntapLicense(object):
+ '''ONTAP license class'''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ serial_number=dict(required=False, type='str'),
+ remove_unused=dict(default=None, type='bool'),
+ remove_expired=dict(default=None, type='bool'),
+ license_codes=dict(default=None, type='list', elements='str'),
+ license_names=dict(default=None, type='list', elements='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False,
+ required_if=[
+ ('state', 'absent', ['serial_number', 'license_names'])]
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.state = parameters['state']
+ self.serial_number = parameters['serial_number']
+ self.remove_unused = parameters['remove_unused']
+ self.remove_expired = parameters['remove_expired']
+ self.license_codes = parameters['license_codes']
+ self.license_names = parameters['license_names']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_licensing_status(self):
+ """
+ Check licensing status
+
+ :return: package (key) and licensing status (value)
+ :rtype: dict
+ """
+ license_status = netapp_utils.zapi.NaElement(
+ 'license-v2-status-list-info')
+ result = None
+ try:
+ result = self.server.invoke_successfully(license_status,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error checking license status: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ return_dictionary = {}
+ license_v2_status = result.get_child_by_name('license-v2-status')
+ if license_v2_status:
+ for license_v2_status_info in license_v2_status.get_children():
+ package = license_v2_status_info.get_child_content('package')
+ status = license_v2_status_info.get_child_content('method')
+ return_dictionary[package] = status
+
+ return return_dictionary
+
+ def remove_licenses(self, package_name):
+ """
+ Remove requested licenses
+ :param:
+ package_name: Name of the license to be deleted
+ """
+ license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
+ license_delete.add_new_child('serial-number', self.serial_number)
+ license_delete.add_new_child('package', package_name)
+ try:
+ self.server.invoke_successfully(license_delete,
+ enable_tunneling=False)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 - Object not found
+ if to_native(error.code) == "15661":
+ return False
+ else:
+ self.module.fail_json(msg="Error removing license %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def remove_unused_licenses(self):
+ """
+ Remove unused licenses
+ """
+ remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
+ try:
+ self.server.invoke_successfully(remove_unused,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing unused licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def remove_expired_licenses(self):
+ """
+ Remove expired licenses
+ """
+ remove_expired = netapp_utils.zapi.NaElement(
+ 'license-v2-delete-expired')
+ try:
+ self.server.invoke_successfully(remove_expired,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing expired licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def add_licenses(self):
+ """
+ Add licenses
+ """
+ license_add = netapp_utils.zapi.NaElement('license-v2-add')
+ codes = netapp_utils.zapi.NaElement('codes')
+ for code in self.license_codes:
+ codes.add_new_child('license-code-v2', str(code.strip().lower()))
+ license_add.add_child_elem(codes)
+ try:
+ self.server.invoke_successfully(license_add,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error adding licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call add, delete or modify methods'''
+ changed = False
+ create_license = False
+ remove_license = False
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_license", cserver)
+ # Add / Update licenses.
+ license_status = self.get_licensing_status()
+
+ if self.state == 'absent': # delete
+ changed = True
+ else: # add or update
+ if self.license_codes is not None:
+ create_license = True
+ changed = True
+ if self.remove_unused is not None:
+ remove_license = True
+ changed = True
+ if self.remove_expired is not None:
+ remove_license = True
+ changed = True
+ if changed and not self.module.check_mode:
+ if self.state == 'present': # execute create
+ if create_license:
+ self.add_licenses()
+ if self.remove_unused is not None:
+ self.remove_unused_licenses()
+ if self.remove_expired is not None:
+ self.remove_expired_licenses()
+ # not able to detect that a new license is required until we try to install it.
+ if create_license or remove_license:
+ new_license_status = self.get_licensing_status()
+ if local_cmp(license_status, new_license_status) == 0:
+ changed = False
+ else: # execute delete
+ license_deleted = False
+ # not able to detect which license is required to delete until we try it.
+ for package in self.license_names:
+ license_deleted |= self.remove_licenses(package)
+ changed = license_deleted
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ '''Apply license operations'''
+ obj = NetAppOntapLicense()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py
new file mode 100644
index 00000000..d68030fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_login_messages
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_login_messages
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.1.0'
+short_description: Setup login banner and message of the day
+description:
+ - This module allows you to manipulate login banner and motd for a vserver
+options:
+ banner:
+ description:
+ - Login banner Text message.
+ type: str
+ vserver:
+ description:
+ - The name of the SVM login messages should be set for.
+ required: true
+ type: str
+ motd_message:
+ description:
+ - MOTD Text message.
+ type: str
+ aliases:
+ - message
+ show_cluster_motd:
+ description:
+ - Set to I(false) if Cluster-level Message of the Day should not be shown
+ type: bool
+ default: True
+'''
+
+EXAMPLES = """
+
+ - name: modify banner vserver
+ na_ontap_login_messages:
+ vserver: trident_svm
+ banner: this is trident vserver
+ usename: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+ - name: modify motd vserver
+ na_ontap_login_messages:
+ vserver: trident_svm
+ motd_message: this is trident vserver
+ show_cluster_motd: True
+ usename: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLoginMessages(object):
+ """
+ modify and delete login banner and motd
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ banner=dict(required=False, type='str'),
+ motd_message=dict(required=False, type='str', aliases=['message']),
+ show_cluster_motd=dict(default=True, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['show_cluster_motd', 'banner', 'motd_message']]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_banner_motd(self, uuid=None):
+ if self.use_rest:
+ api = 'security/login/messages/' + uuid
+ params = {
+ 'fields': '*'
+ }
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg='Error when fetching login_banner info: %s' % error)
+ return_result = dict()
+ return_result['banner'] = message['banner'].rstrip() if message.get('banner') else ''
+ return_result['motd_message'] = message['message'].rstrip() if message.get('message') else ''
+ if message.get('show_cluster_message'):
+ return_result['show_cluster_message'] = message['show_cluster_message']
+ return return_result
+ else:
+ login_banner_get_iter = netapp_utils.zapi.NaElement('vserver-login-banner-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ login_banner_info = netapp_utils.zapi.NaElement('vserver-login-banner-info')
+ login_banner_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(login_banner_info)
+ login_banner_get_iter.add_child_elem(query)
+ return_result = dict()
+ try:
+ result = self.server.invoke_successfully(login_banner_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching login_banner info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ login_banner_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-login-banner-info')
+ return_result['banner'] = login_banner_info.get_child_content('message')
+ return_result['banner'] = str(return_result['banner']).rstrip()
+ # if the message is '-' that means the banner doesn't exist.
+ if return_result['banner'] == '-' or return_result['banner'] == 'None':
+ return_result['banner'] = ''
+
+ motd_get_iter = netapp_utils.zapi.NaElement('vserver-motd-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(motd_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching motd info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ motd_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-motd-info')
+ return_result['motd_message'] = motd_info.get_child_content('message')
+ return_result['motd_message'] = str(return_result['motd_message']).rstrip()
+ return_result['show_cluster_motd'] = True if motd_info.get_child_content(
+ 'is-cluster-message-enabled') == 'true' else False
+ if return_result['motd_message'] == 'None':
+ return_result['motd_message'] = ''
+ return return_result
+
+ def modify_banner(self, modify, uuid):
+ if self.use_rest:
+ api = 'security/login/messages/' + uuid
+ params = {
+ "banner": modify['banner']
+ }
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg='Error when modifying banner: %s' % error)
+ else:
+ login_banner_modify = netapp_utils.zapi.NaElement('vserver-login-banner-modify-iter')
+ login_banner_modify.add_new_child('message', modify['banner'])
+ query = netapp_utils.zapi.NaElement('query')
+ login_banner_info = netapp_utils.zapi.NaElement('vserver-login-banner-info')
+ login_banner_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(login_banner_info)
+ login_banner_modify.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(login_banner_modify, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error modifying login_banner: %s" % (to_native(err)),
+ exception=traceback.format_exc())
+
+ def modify_motd(self, modify, uuid):
+ if self.use_rest:
+ api = 'security/login/messages/' + uuid
+ params = {
+ 'message': modify['motd_message'],
+ }
+ if modify.get('show_cluster_motd'):
+ params['show_cluster_message'] = modify['show_cluster_motd']
+ dummy, error = self.rest_api.patch(api, params)
+ if error:
+ self.module.fail_json(msg='Error when modifying motd: %s' % error)
+ else:
+ motd_create = netapp_utils.zapi.NaElement('vserver-motd-modify-iter')
+ if modify.get('motd_message') is not None:
+ motd_create.add_new_child('message', modify['motd_message'])
+ if modify.get('show_cluster_motd') is not None:
+ motd_create.add_new_child('is-cluster-message-enabled', 'true' if modify['show_cluster_motd'] is True else 'false')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_create.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(motd_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error modifying motd: %s" % (to_native(err)),
+ exception=traceback.format_exc())
+
+ def get_svm_uuid(self):
+ """
+ Get a svm's uuid
+ :return: uuid of the svm
+ """
+ params = {'name': self.parameters['vserver'],
+ 'fields': 'uuid'
+ }
+ api = 'svm/svms'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ if message['num_records'] == 0:
+ self.module.fail_json(msg="Error fetching specified vserver. Please make sure vserver name is correct. For cluster vserver, Please use ZAPI.")
+ return message['records'][0]['uuid']
+
+ def apply(self):
+ uuid = None
+ modify = None
+ if self.use_rest:
+ uuid = self.get_svm_uuid()
+ else:
+ netapp_utils.ems_log_event("na_ontap_login_banner", self.server)
+
+ current = self.get_banner_motd(uuid=uuid)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify.get('banner') is not None:
+ self.modify_banner(modify, uuid=uuid)
+ if modify.get('show_cluster_motd') is not None or modify.get('motd_message') is not None:
+ self.modify_motd(modify, uuid=uuid)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Execute action from playbook'''
+ messages_obj = NetAppOntapLoginMessages()
+ messages_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py
new file mode 100644
index 00000000..8f9b5abb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py
@@ -0,0 +1,757 @@
+#!/usr/bin/python
+
+# (c) 2017-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_lun
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_lun
+
+short_description: NetApp ONTAP manage LUNs
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, destroy, resize LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the LUN to manage.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - The name of the LUN to be renamed.
+ type: str
+ version_added: 20.12.0
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the LUN should exist on.
+ - Required if san_application_template is not present.
+ - Not allowed if san_application_template is present.
+ type: str
+
+ size:
+ description:
+ - The size of the LUN in C(size_unit).
+ - Required when C(state=present).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ type: str
+
+ force_resize:
+ description:
+ Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally
+ reducing the LUN size.
+ type: bool
+ default: false
+
+ force_remove:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
+ - If "false", destroying an online and mapped LUN will fail.
+ type: bool
+ default: false
+
+ force_remove_fenced:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed while it is fenced.
+ - If "false", attempting to destroy a fenced LUN will fail.
+ - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
+ type: bool
+ default: false
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+ type: str
+
+ os_type:
+ description:
+ - The os type for the LUN.
+ type: str
+ aliases: ['ostype']
+
+ qos_policy_group:
+ description:
+ - The QoS policy group to be set on the LUN.
+ type: str
+ version_added: 20.12.0
+
+ space_reserve:
+ description:
+ - This can be set to "false" which will create a LUN without any space being reserved.
+ type: bool
+ default: True
+
+ space_allocation:
+ description:
+ - This enables support for the SCSI Thin Provisioning features. If the Host and file system do
+ not support this do not enable it.
+ type: bool
+ default: False
+ version_added: 2.7.0
+
+ use_exact_size:
+ description:
+ - This can be set to "False" which will round the LUN >= 450g.
+ type: bool
+ default: True
+ version_added: 20.11.0
+
+ san_application_template:
+ description:
+ - additional options when using the application/applications REST API to create LUNs.
+ - the module is using ZAPI by default, and switches to REST if any suboption is present.
+ - create one or more LUNs (and the associated volume as needed).
+ - only creation or deletion of a SAN application is supported. Changes are ignored.
+ - operations at the LUN level are supported, they require to know the LUN short name.
+ - this requires ONTAP 9.6 or higher.
+ type: dict
+ version_added: 20.12.0
+ suboptions:
+ name:
+ description: name of the SAN application.
+ type: str
+ required: True
+ igroup_name:
+ description: name of the initiator group through which the contents of this application will be accessed.
+ type: str
+ lun_count:
+ description: number of LUNs in the application component (1 to 32).
+ type: int
+ protection_type:
+ description:
+ - The snasphot policy for the volume supporting the LUNs.
+ type: dict
+ suboptions:
+ local_policy:
+ description:
+ - The snapshot copy policy for the volume.
+ type: str
+ storage_service:
+ description:
+ - The performance service level (PSL) for this volume
+ type: str
+ choices: ['value', 'performance', 'extreme']
+ tiering:
+ description:
+ - Cloud tiering policy (see C(tiering_policy) for a more complete description).
+ type: dict
+ suboptions:
+ control:
+ description: Storage tiering placement rules for the container.
+ choices: ['required', 'best_effort', 'disallowed']
+ type: str
+ policy:
+ description:
+ - Cloud tiering policy (see C(tiering_policy)).
+ - Must match C(tiering_policy) if both are present.
+ choices: ['snapshot-only', 'auto', 'backup', 'none']
+ type: str
+ object_stores:
+ description: list of object store names for tiering.
+ type: list
+ elements: str
+ use_san_application:
+ description:
+ - Whether to use the application/applications REST/API to create LUNs.
+ - This will default to true if any other suboption is present.
+ type: bool
+ default: true
+
+'''
+
+EXAMPLES = """
+- name: Create LUN
+ na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: mb
+ os_type: linux
+ space_reserve: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Resize LUN
+ na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ force_resize: True
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: gb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Create LUNs using SAN application
+ tags: create
+ na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ size: 15
+ size_unit: mb
+ os_type: linux
+ space_reserve: false
+ san_application_template:
+ name: san-ansibleLUN
+ igroup_name: testme_igroup
+ lun_count: 3
+ protection_type:
+ local_policy: default
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLUN(object):
+ ''' create, modify, delete LUN '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ force_resize=dict(default=False, type='bool'),
+ force_remove=dict(default=False, type='bool'),
+ force_remove_fenced=dict(default=False, type='bool'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ os_type=dict(required=False, type='str', aliases=['ostype']),
+ qos_policy_group=dict(required=False, type='str'),
+ space_reserve=dict(required=False, type='bool', default=True),
+ space_allocation=dict(required=False, type='bool', default=False),
+ use_exact_size=dict(required=False, type='bool', default=True),
+ san_application_template=dict(type='dict', options=dict(
+ use_san_application=dict(type='bool', default=True),
+ name=dict(required=True, type='str'),
+ igroup_name=dict(type='str'),
+ lun_count=dict(type='int'),
+ protection_type=dict(type='dict', options=dict(
+ local_policy=dict(type='str'),
+ )),
+ storage_service=dict(type='str', choices=['value', 'performance', 'extreme']),
+ tiering=dict(type='dict', options=dict(
+ control=dict(type='str', choices=['required', 'best_effort', 'disallowed']),
+ policy=dict(type='str', choices=['snapshot-only', 'auto', 'backup', 'none']),
+ object_stores=dict(type='list', elements='str') # create only
+ )),
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up state variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('size') is not None:
+ self.parameters['size'] *= netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ # REST API for application/applications if needed
+ self.rest_api, self.rest_app = self.setup_rest_application()
+
+ def setup_rest_application(self):
+ use_application_template = self.na_helper.safe_get(self.parameters, ['san_application_template', 'use_san_application'])
+ rest_api, rest_app = None, None
+ if use_application_template:
+ if self.parameters.get('flexvol_name') is not None:
+ self.module.fail_json(msg="'flexvol_name' option is not supported when san_application_template is present")
+ rest_api = netapp_utils.OntapRestAPI(self.module)
+ name = self.na_helper.safe_get(self.parameters, ['san_application_template', 'name'], allow_sparse_dict=False)
+ rest_app = RestApplication(rest_api, self.parameters['vserver'], name)
+ elif self.parameters.get('flexvol_name') is None:
+ self.module.fail_json(msg="flexvol_name option is required when san_application_template is not present")
+ return rest_api, rest_app
+
+ def get_luns(self, lun_path=None):
+ """
+ Return list of LUNs matching vserver and volume names.
+
+ :return: list of LUNs in XML format.
+ :rtype: list
+ """
+ luns = []
+ tag = None
+ if lun_path is None and self.parameters.get('flexvol_name') is None:
+ return luns
+
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('vserver', self.parameters['vserver'])
+ if lun_path is not None:
+ query_details.add_new_child('lun_path', lun_path)
+ else:
+ query_details.add_new_child('volume', self.parameters['flexvol_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ while True:
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ lun_info.add_child_elem(query)
+ if tag:
+ lun_info.add_new_child('tag', tag, True)
+
+ result = self.server.invoke_successfully(lun_info, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attr_list = result.get_child_by_name('attributes-list')
+ luns.extend(attr_list.get_children())
+ tag = result.get_child_content('next-tag')
+ if tag is None:
+ break
+ return luns
+
+ def get_lun_details(self, lun):
+ """
+ Extract LUN details, from XML to python dict
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ return_value = dict()
+ return_value['size'] = int(lun.get_child_content('size'))
+ bool_attr_map = {
+ 'is-space-alloc-enabled': 'space_allocation',
+ 'is-space-reservation-enabled': 'space_reserve'
+ }
+ for attr in bool_attr_map:
+ value = lun.get_child_content(attr)
+ if value is not None:
+ return_value[bool_attr_map[attr]] = self.na_helper.get_value_for_bool(True, value)
+ str_attr_map = {
+ 'name': 'name',
+ 'path': 'path',
+ 'qos-policy-group': 'qos_policy_group',
+ 'multiprotocol-type': 'os_type'
+ }
+ for attr in str_attr_map:
+ value = lun.get_child_content(attr)
+ if value is not None:
+ return_value[str_attr_map[attr]] = value
+
+ # Find out if the lun is attached
+ attached_to = None
+ lun_id = None
+ if lun.get_child_content('mapped') == 'true':
+ lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-map-list-info', **{'path': lun.get_child_content('path')})
+ result = self.server.invoke_successfully(
+ lun_map_list, enable_tunneling=True)
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ igroup = igroup_info.get_child_content(
+ 'initiator-group-name')
+ attached_to = igroup
+ lun_id = igroup_info.get_child_content('lun-id')
+
+ return_value.update({
+ 'attached_to': attached_to,
+ 'lun_id': lun_id
+ })
+ return return_value
+
+ def find_lun(self, luns, name, lun_path=None):
+ """
+ Return lun record matching name or path
+
+ :return: lun record
+ :rtype: XML or None if not found
+ """
+ for lun in luns:
+ path = lun.get_child_content('path')
+ if lun_path is not None:
+ if lun_path == path:
+ return lun
+ else:
+ if name == path:
+ return lun
+ _rest, _splitter, found_name = path.rpartition('/')
+ if found_name == name:
+ return lun
+ return None
+
+ def get_lun(self, name, lun_path=None):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ luns = self.get_luns(lun_path)
+ lun = self.find_lun(luns, name, lun_path)
+ if lun is not None:
+ return self.get_lun_details(lun)
+ return None
+
+ def get_luns_from_app(self):
+ app_details, error = self.rest_app.get_application_details()
+ self.fail_on_error(error)
+ if app_details is not None:
+ app_details['paths'] = self.get_lun_paths_from_app()
+ return app_details
+
+ def get_lun_paths_from_app(self):
+ """Get luns path for SAN application"""
+ backing_storage, error = self.rest_app.get_application_component_backing_storage()
+ self.fail_on_error(error)
+ # {'luns': [{'path': '/vol/ansibleLUN/ansibleLUN_1', ...
+ if backing_storage is not None:
+ return [lun['path'] for lun in backing_storage.get('luns', [])]
+ return None
+
+ def get_lun_path_from_backend(self, name):
+ """returns lun path matching name if found in backing_storage
+ retruns None if not found
+ """
+ lun_paths = self.get_lun_paths_from_app()
+ match = "/%s" % name
+ for path in lun_paths:
+ if path.endswith(match):
+ return path
+ return None
+
+ def create_san_app_component(self):
+ '''Create SAN application component'''
+ required_options = ('name', 'size')
+ for option in required_options:
+ if self.parameters.get(option) is None:
+ self.module.fail_json(msg='Error: "%s" is required to create san application.' % option)
+
+ application_component = dict(
+ name=self.parameters['name'],
+ total_size=self.parameters['size'],
+ lun_count=1 # default value, may be overriden below
+ )
+ for attr in ('igroup_name', 'lun_count', 'storage_service'):
+ value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
+ if value is not None:
+ application_component[attr] = value
+ for attr in ('os_type', 'qos_policy_group'):
+ value = self.na_helper.safe_get(self.parameters, [attr])
+ if value is not None:
+ if attr == 'qos_policy_group':
+ attr = 'qos'
+ value = dict(policy=dict(name=value))
+ application_component[attr] = value
+ tiering = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering'])
+ if tiering is not None:
+ application_component['tiering'] = dict()
+ for attr in ('control', 'policy', 'object_stores'):
+ value = tiering.get(attr)
+ if attr == 'object_stores' and value is not None:
+ value = [dict(name=x) for x in value]
+ if value is not None:
+ application_component['tiering'][attr] = value
+ return application_component
+
+ def create_san_app_body(self):
+ '''Create body for san template'''
+ # TODO:
+ # Should we support new_igroups?
+ # It may raise idempotency issues if the REST call fails if the igroup already exists.
+ # And we already have na_ontap_igroups.
+ san = {
+ 'application_components': [self.create_san_app_component()],
+ }
+ for attr in ('protection_type',):
+ value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
+ if value is not None:
+ # we expect value to be a dict, but maybe an empty dict
+ value = self.na_helper.filter_out_none_entries(value)
+ if value:
+ san[attr] = value
+ for attr in ('os_type',):
+ value = self.na_helper.safe_get(self.parameters, [attr])
+ if value is not None:
+ san[attr] = value
+ body, error = self.rest_app.create_application_body('san', san)
+ return body, error
+
+ def create_san_application(self):
+ '''Use REST application/applications san template to create one or more LUNs'''
+ body, error = self.create_san_app_body()
+ self.fail_on_error(error)
+ dummy, error = self.rest_app.create_application(body)
+ self.fail_on_error(error)
+
+ def delete_san_application(self):
+ '''Use REST application/applications san template to delete one or more LUNs'''
+ dummy, error = self.rest_app.delete_application()
+ self.fail_on_error(error)
+
+ def create_lun(self):
+ """
+ Create LUN with requested name and size
+ """
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ options = {'path': path,
+ 'size': str(self.parameters['size']),
+ 'space-reservation-enabled': str(self.parameters['space_reserve']),
+ 'space-allocation-enabled': str(self.parameters['space_allocation']),
+ 'use-exact-size': str(self.parameters['use_exact_size'])}
+ if self.parameters.get('os_type') is not None:
+ options['ostype'] = self.parameters['os_type']
+ if self.parameters.get('qos_policy_group') is not None:
+ options['qos-policy-group'] = self.parameters['qos_policy_group']
+ lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-create-by-size', **options)
+
+ try:
+ self.server.invoke_successfully(lun_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error provisioning lun %s of size %s: %s"
+ % (self.parameters['name'], self.parameters['size'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_lun(self, path):
+ """
+ Delete requested LUN
+ """
+ lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-destroy', **{'path': path,
+ 'force': str(self.parameters['force_remove']),
+ 'destroy-fenced-lun':
+ str(self.parameters['force_remove_fenced'])})
+
+ try:
+ self.server.invoke_successfully(lun_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def resize_lun(self, path):
+ """
+ Resize requested LUN.
+
+ :return: True if LUN was actually re-sized, false otherwise.
+ :rtype: bool
+ """
+ lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-resize', **{'path': path,
+ 'size': str(self.parameters['size']),
+ 'force': str(self.parameters['force_resize'])})
+ try:
+ self.server.invoke_successfully(lun_resize, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ if to_native(exc.code) == "9042":
+ # Error 9042 denotes the new LUN size being the same as the
+ # old LUN size. This happens when there's barely any difference
+ # in the two sizes. For example, from 8388608 bytes to
+ # 8194304 bytes. This should go away if/when the default size
+ # requested/reported to/from the controller is changed to a
+ # larger unit (MB/GB/TB).
+ return False
+ else:
+ self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ return True
+
+ def set_lun_value(self, path, key, value):
+ key_to_zapi = dict(
+ qos_policy_group=('lun-set-qos-policy-group', 'qos-policy-group'),
+ space_allocation=('lun-set-space-alloc', 'enable'),
+ space_reserve=('lun-set-space-reservation-info', 'enable')
+ )
+ if key in key_to_zapi:
+ zapi, option = key_to_zapi[key]
+ else:
+ self.module.fail_json(msg="option %s cannot be modified to %s" % (key, value))
+ options = dict(path=path)
+ if option == 'enable':
+ options[option] = self.na_helper.get_value_for_bool(False, value)
+ else:
+ options[option] = value
+
+ lun_set = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(lun_set, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error setting lun option %s: %s" % (key, to_native(exc)),
+ exception=traceback.format_exc())
+ return
+
+ def modify_lun(self, path, modify):
+ """
+ update LUN properties (except size or name)
+ """
+ for key, value in modify.items():
+ self.set_lun_value(path, key, value)
+
+ def rename_lun(self, path, new_path):
+ """
+ rename LUN
+ """
+ lun_move = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-move', **{'path': path,
+ 'new-path': new_path})
+ try:
+ self.server.invoke_successfully(lun_move, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error moving lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def fail_on_error(self, error, stack=False):
+ if error is None:
+ return
+ elements = dict(msg="Error: %s" % error)
+ if stack:
+ elements['stack'] = traceback.format_stack()
+ self.module.fail_json(**elements)
+
+ def apply(self):
+ results = dict()
+ warnings = list()
+ netapp_utils.ems_log_event("na_ontap_lun", self.server)
+ app_cd_action = None
+ if self.rest_app:
+ app_current, error = self.rest_app.get_application_uuid()
+ self.fail_on_error(error)
+ app_cd_action = self.na_helper.get_cd_action(app_current, self.parameters)
+ if app_cd_action == 'create' and self.parameters.get('size') is None:
+ self.module.fail_json(msg="size is a required parameter for create.")
+
+ # For LUNs created using a SAN application, we're getting lun paths from the backing storage
+ lun_path, from_lun_path = None, None
+ from_name = self.parameters.get('from_name')
+ if self.rest_app and app_cd_action is None and app_current:
+ lun_path = self.get_lun_path_from_backend(self.parameters['name'])
+ if from_name is not None:
+ from_lun_path = self.get_lun_path_from_backend(from_name)
+
+ if app_cd_action is None:
+ # actions at LUN level
+ current = self.get_lun(self.parameters['name'], lun_path)
+ if current is not None and lun_path is None:
+ lun_path = current['path']
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify, rename = None, None
+ if cd_action == 'create' and from_name is not None:
+ # create by renaming existing LUN, if it really exists
+ old_lun = self.get_lun(from_name, from_lun_path)
+ rename = self.na_helper.is_rename_action(old_lun, current)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming lun: %s does not exist" % from_name)
+ if rename:
+ current = old_lun
+ if from_lun_path is None:
+ from_lun_path = current['path']
+ head, _sep, tail = from_lun_path.rpartition(from_name)
+ if tail:
+ self.module.fail_json(msg="Error renaming lun: %s does not match lun_path %s" % (from_name, from_lun_path))
+ lun_path = head + self.parameters['name']
+ results['renamed'] = True
+ cd_action = None
+ if cd_action == 'create' and self.parameters.get('size') is None:
+ self.module.fail_json(msg="size is a required parameter for create.")
+ if cd_action is None and self.parameters['state'] == 'present':
+ # we already handled rename if required
+ current.pop('name', None)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ results['modify'] = dict(modify)
+ if cd_action and self.rest_app and app_cd_action is None and app_current:
+ msg = 'This module does not support %s a LUN by name %s a SAN application.' %\
+ ('adding', 'to') if cd_action == 'create' else ('removing', 'from')
+ warnings.append(msg)
+ cd_action = None
+ self.na_helper.changed = False
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if app_cd_action == 'create':
+ self.create_san_application()
+ elif app_cd_action == 'delete':
+ self.rest_app.delete_application()
+ elif cd_action == 'create':
+ self.create_lun()
+ elif cd_action == 'delete':
+ self.delete_lun(lun_path)
+ else:
+ if rename:
+ self.rename_lun(from_lun_path, lun_path)
+ size_changed = False
+ if modify and 'size' in modify:
+ # Ensure that size was actually changed. Please
+ # read notes in 'resize_lun' function for details.
+ size_changed = self.resize_lun(lun_path)
+ modify.pop('size')
+ if modify:
+ self.modify_lun(lun_path, modify)
+ if not modify and not rename:
+ # size may not have changed
+ self.na_helper.changed = size_changed
+
+ results['changed'] = self.na_helper.changed
+ self.module.exit_json(**results)
+
+
+def main():
+ lun = NetAppOntapLUN()
+ lun.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py
new file mode 100644
index 00000000..e55b663c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_lun_copy
+
+short_description: NetApp ONTAP copy LUNs
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Copy LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+
+ destination_vserver:
+ description:
+ - the name of the Vserver that will host the new LUN.
+ required: true
+ type: str
+
+ destination_path:
+ description:
+ - Specifies the full path to the new LUN.
+ required: true
+ type: str
+
+ source_path:
+ description:
+ - Specifies the full path to the source LUN.
+ required: true
+ type: str
+
+ source_vserver:
+ description:
+ - Specifies the name of the vserver hosting the LUN to be copied.
+ type: str
+
+ '''
+EXAMPLES = """
+- name: Copy LUN
+ na_ontap_lun_copy:
+ destination_vserver: ansible
+ destination_path: /vol/test/test_copy_dest_dest_new
+ source_path: /vol/test/test_copy_1
+ source_vserver: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLUNCopy(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ destination_vserver=dict(required=True, type='str'),
+ destination_path=dict(required=True, type='str'),
+ source_path=dict(required=True, type='str'),
+ source_vserver=dict(required=False, type='str'),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['destination_vserver'])
+
+ def get_lun(self):
+ """
+ Check if the LUN exists
+
+ :return: true is it exists, false otherwise
+ :rtype: bool
+ """
+
+ return_value = False
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+
+ query_details.add_new_child('path', self.parameters['destination_path'])
+ query_details.add_new_child('vserver', self.parameters['destination_vserver'])
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(lun_info, True)
+
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error getting lun info %s for verver %s: %s" %
+ (self.parameters['destination_path'], self.parameters['destination_vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return_value = True
+ return return_value
+
+ def copy_lun(self):
+ """
+ Copy LUN with requested path and vserver
+ """
+ lun_copy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-copy-start', **{'source-vserver': self.parameters['source_vserver']})
+
+ path_obj = netapp_utils.zapi.NaElement('paths')
+ pair = netapp_utils.zapi.NaElement('lun-path-pair')
+ pair.add_new_child('destination-path', self.parameters['destination_path'])
+ pair.add_new_child('source-path', self.parameters['source_path'])
+ path_obj.add_child_elem(pair)
+ lun_copy.add_child_elem(path_obj)
+
+ try:
+ self.server.invoke_successfully(lun_copy, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error copying lun from %s to vserver %s: %s" %
+ (self.parameters['source_vserver'], self.parameters['destination_vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+
+ netapp_utils.ems_log_event("na_ontap_lun_copy", self.server)
+ if self.get_lun(): # lun already exists at destination
+ changed = False
+ else:
+ changed = True
+ if self.module.check_mode:
+ pass
+ else:
+ # need to copy lun
+ if self.parameters['state'] == 'present':
+ self.copy_lun()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppOntapLUNCopy()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py
new file mode 100644
index 00000000..b1ee175f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+
+""" this is lun mapping module
+
+ (c) 2018-2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = """
+
+module: na_ontap_lun_map
+
+short_description: NetApp ONTAP LUN maps
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Map and unmap LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ initiator_group_name:
+ description:
+ - Initiator group to map to the given LUN.
+ required: true
+ type: str
+
+ path:
+ description:
+ - Path of the LUN..
+ required: true
+ type: str
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+ type: str
+
+ lun_id:
+ description:
+ - LUN ID assigned for the map.
+ type: str
+
+
+"""
+
+EXAMPLES = """
+- name: Create LUN mapping
+ na_ontap_lun_map:
+ state: present
+ initiator_group_name: ansibleIgroup3234
+ path: /vol/iscsi_path/iscsi_lun
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Unmap LUN
+ na_ontap_lun_map:
+ state: absent
+ initiator_group_name: ansibleIgroup3234
+ path: /vol/iscsi_path/iscsi_lun
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+lun_node:
+ description: NetApp controller that is hosting the LUN.
+ returned: success
+ type: str
+ sample: node01
+lun_ostype:
+ description: Specifies the OS of the host accessing the LUN.
+ returned: success
+ type: str
+ sample: vmware
+lun_serial:
+ description: A unique, 12-byte, ASCII string used to identify the LUN.
+ returned: success
+ type: str
+ sample: 80E7/]LZp1Tt
+lun_naa_id:
+ description: The Network Address Authority (NAA) identifier for the LUN.
+ returned: success
+ type: str
+ sample: 600a0980383045372f5d4c5a70315474
+lun_state:
+ description: Online or offline status of the LUN.
+ returned: success
+ type: str
+ sample: online
+lun_size:
+ description: Size of the LUN in bytes.
+ returned: success
+ type: int
+ sample: 2199023255552
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+import codecs
+from ansible.module_utils._text import to_text, to_bytes
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLUNMap(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ initiator_group_name=dict(required=True, type='str'),
+ path=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ lun_id=dict(required=False, type='str', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['path'])
+ ],
+ supports_check_mode=True
+ )
+
+ self.result = dict(
+ changed=False,
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.initiator_group_name = p['initiator_group_name']
+ self.path = p['path']
+ self.vserver = p['vserver']
+ self.lun_id = p['lun_id']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_lun_map(self):
+ """
+ Return details about the LUN map
+
+ :return: Details about the lun map
+ :rtype: dict
+ """
+ lun_info = netapp_utils.zapi.NaElement('lun-map-list-info')
+ lun_info.add_new_child('path', self.path)
+ result = self.server.invoke_successfully(lun_info, True)
+ return_value = None
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ initiator_group_name = igroup_info.get_child_content('initiator-group-name')
+ lun_id = igroup_info.get_child_content('lun-id')
+ if initiator_group_name == self.initiator_group_name:
+ return_value = {
+ 'lun_id': lun_id
+ }
+ break
+
+ return return_value
+
+ def get_lun(self):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ # build the lun query
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('path', self.path)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_query = netapp_utils.zapi.NaElement('lun-get-iter')
+ lun_query.add_child_elem(query)
+
+ # find lun using query
+ result = self.server.invoke_successfully(lun_query, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ lun = result.get_child_by_name('attributes-list').get_child_by_name('lun-info')
+
+ # extract and assign lun infomation to return value
+ hexlify = codecs.getencoder('hex')
+ naa_hex = to_text(hexlify(to_bytes(lun.get_child_content('serial-number')))[0])
+ return_value = {
+ 'lun_node': lun.get_child_content('node'),
+ 'lun_ostype': lun.get_child_content('multiprotocol-type'),
+ 'lun_serial': lun.get_child_content('serial-number'),
+ 'lun_naa_id': '600a0980' + naa_hex,
+ 'lun_state': lun.get_child_content('state'),
+ 'lun_size': lun.get_child_content('size'),
+ }
+
+ return return_value
+
+ def create_lun_map(self):
+ """
+ Create LUN map
+ """
+ options = {'path': self.path, 'initiator-group': self.initiator_group_name}
+ if self.lun_id is not None:
+ options['lun-id'] = self.lun_id
+ lun_map_create = netapp_utils.zapi.NaElement.create_node_with_children('lun-map', **options)
+
+ try:
+ self.server.invoke_successfully(lun_map_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error mapping lun %s of initiator_group_name %s: %s" %
+ (self.path, self.initiator_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_lun_map(self):
+ """
+ Unmap LUN map
+ """
+ lun_map_delete = netapp_utils.zapi.NaElement.create_node_with_children('lun-unmap', **{'path': self.path, 'initiator-group': self.initiator_group_name})
+
+ try:
+ self.server.invoke_successfully(lun_map_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error unmapping lun %s of initiator_group_name %s: %s" %
+ (self.path, self.initiator_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_lun_map", self.server)
+ lun_details = self.get_lun()
+ lun_map_details = self.get_lun_map()
+
+ if self.state == 'present' and lun_details:
+ self.result.update(lun_details)
+
+ if self.state == 'present' and not lun_map_details:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.create_lun_map()
+ elif self.state == 'absent' and lun_map_details:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.delete_lun_map()
+
+ self.module.exit_json(**self.result)
+
+
+def main():
+ v = NetAppOntapLUNMap()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py
new file mode 100644
index 00000000..415d6633
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This module implements the operations for ONTAP MCC Mediator.
+# The Mediator is supported for MCC IP configs from ONTAP 9.7 or later.
+# This module requires REST APIs for Mediator which is supported from
+# ONTAP 9.8 (DW) or later
+
+'''
+na_ontap_mcc_mediator
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_mcc_mediator
+short_description: NetApp ONTAP Add and Remove MetroCluster Mediator
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add and remove ONTAP MCC Mediator
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether MCCIP Mediator is present or not."
+ default: present
+ type: str
+
+ mediator_address:
+ description:
+ - ip address of the mediator
+ type: str
+ required: true
+
+ mediator_user:
+ description:
+ - username of the mediator
+ type: str
+ required: true
+
+ mediator_password:
+ description:
+ - password of the mediator
+ type: str
+ required: true
+
+'''
+
+EXAMPLES = """
+ - name: Add ONTAP MCCIP Mediator
+ na_ontap_mcc_mediator:
+ state: present
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ mediator_address: mediator_ip
+ mediator_user: metrocluster_admin
+ mediator_password: netapp1!
+
+ - name: Delete ONTAP MCCIP Mediator
+ na_ontap_mcc_mediator:
+ state: absent
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ mediator_user: metrocluster_admin
+ mediator_password: netapp1!
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppOntapMccipMediator(object):
+ """
+ Mediator object for Add/Remove/Display
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ mediator_address=dict(required=True, type='str'),
+ mediator_user=dict(required=True, type='str'),
+ mediator_password=dict(required=True, type='str', no_log=True),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_mcc_mediator'))
+
+ def add_mediator(self):
+ """
+ Adds an ONTAP Mediator to MCC configuration
+ """
+ api = 'cluster/mediators'
+ params = {
+ 'ip_address': self.parameters['mediator_address'],
+ 'password': self.parameters['mediator_password'],
+ 'user': self.parameters['mediator_user']
+ }
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def remove_mediator(self, current_uuid):
+ """
+ Removes the ONTAP Mediator from MCC configuration
+ """
+ api = 'cluster/mediators/%s' % current_uuid
+ params = {
+ 'ip_address': self.parameters['mediator_address'],
+ 'password': self.parameters['mediator_password'],
+ 'user': self.parameters['mediator_user'],
+ 'uuid': current_uuid
+ }
+ dummy, error = self.rest_api.delete(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def get_mediator(self):
+ """
+ Determine if the MCC configuration has added an ONTAP Mediator
+ """
+ api = "cluster/mediators"
+ message, error = self.rest_api.get(api, None)
+ if error:
+ self.module.fail_json(msg=error)
+ if message['num_records'] > 0:
+ return message['records'][0]['uuid']
+ return None
+
+ def apply(self):
+ """
+ Apply action to MCC Mediator
+ """
+ current = self.get_mediator()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.add_mediator()
+ elif cd_action == 'delete':
+ self.remove_mediator(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Add, Remove and display ONTAP MCC Mediator
+ """
+ mediator_obj = NetAppOntapMccipMediator()
+ mediator_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py
new file mode 100644
index 00000000..16344452
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+"""
+(c) 2020, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+module: na_ontap_metrocluster
+short_description: NetApp ONTAP set up a MetroCluster
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.9.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+requirements:
+ - ONTAP >= 9.8
+
+description:
+ - Configure MetroCluster.
+options:
+ state:
+ choices: ['present']
+ description:
+ - Present to set up a MetroCluster
+ default: present
+ type: str
+ dr_pairs:
+ description: disaster recovery pair
+ type: list
+ required: true
+ elements: dict
+ suboptions:
+ node_name:
+ description:
+ - the name of the main node
+ required: true
+ type: str
+ partner_node_name:
+ description:
+ - the name of the main partner node
+ required: true
+ type: str
+ partner_cluster_name:
+ description:
+ - The name of the partner Cluster
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+-
+ name: Manage MetroCluster
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: True
+ validate_certs: False
+ tasks:
+ - name: Create MetroCluster
+ na_ontap_metrocluster:
+ <<: *login
+ dr_pairs:
+ - partner_node_name: rha17-a2
+ node_name: rha17-b2
+ partner_cluster_name: rha2-b2b1_siteB
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPMetroCluster(object):
+ ''' ONTAP metrocluster operations '''
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present'], default='present'),
+ dr_pairs=dict(required=True, type='list', elements='dict', options=dict(
+ node_name=dict(required=True, type='str'),
+ partner_node_name=dict(required=True, type='str')
+ )),
+ partner_cluster_name=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_metrocluster'))
+
+ def get_metrocluster(self):
+ attrs = None
+ api = 'cluster/metrocluster'
+ options = {'fields': '*'}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if message is not None:
+ local = message['local']
+ if local['configuration_state'] != "not_configured":
+ attrs = {
+ 'configuration_state': local['configuration_state'],
+ 'partner_cluster_reachable': local['partner_cluster_reachable'],
+ 'partner_cluster_name': local['cluster']['name']
+ }
+ return attrs
+
+ def create_metrocluster(self):
+ api = 'cluster/metrocluster'
+ options = {}
+ dr_pairs = []
+ for pair in self.parameters['dr_pairs']:
+ dr_pairs.append({'node': {'name': pair['node_name']},
+ 'partner': {'name': pair['partner_node_name']}})
+ partner_cluster = {'name': self.parameters['partner_cluster_name']}
+ data = {'dr_pairs': dr_pairs, 'partner_cluster': partner_cluster}
+ message, error = self.rest_api.post(api, data, options)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def apply(self):
+ current = self.get_metrocluster()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_metrocluster()
+ # Since there is no modify or delete, we will return no change
+ else:
+ self.module.fail_json(msg="Modify and Delete currently not support in API")
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppONTAPMetroCluster()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py
new file mode 100644
index 00000000..d8345c3d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+"""
+(c) 2020, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+module: na_ontap_metrocluster_dr_group
+short_description: NetApp ONTAP manage MetroCluster DR Group
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.11.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+requirements:
+ - ONTAP >= 9.8
+description:
+ - Create/Delete MetroCluster DR Group
+ - Create only supports MCC IP
+ - Delete supports both MCC IP and MCC FC
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ add or remove DR groups
+ default: present
+ type: str
+ dr_pairs:
+ description: disaster recovery pairs
+ type: list
+ required: true
+ elements: dict
+ suboptions:
+ node_name:
+ description:
+ - the name of the main node
+ required: true
+ type: str
+ partner_node_name:
+ description:
+ - the name of the main partner node
+ required: true
+ type: str
+ partner_cluster_name:
+ description:
+ - The name of the partner cluster
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+-
+ name: Manage MetroCluster DR group
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: True
+ validate_certs: False
+ tasks:
+ - name: Create MetroCluster DR group
+ na_ontap_metrocluster_dr_group:
+ <<: *login
+ dr_pairs:
+ - partner_name: carchi_cluster3_01
+ node_name: carchi_cluster1_01
+ partner_cluster_name: carchi_cluster3
+ - name: Delete MetroCluster DR group
+ na_ontap_metrocluster_dr_group:
+ <<: *login
+ dr_pairs:
+ - partner_name: carchi_cluster3_01
+ node_name: carchi_cluster1_01
+ state: absent
+ partner_cluster_name: carchi_cluster3
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPMetroClusterDRGroup(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ dr_pairs=dict(required=True, type='list', elements='dict', options=dict(
+ node_name=dict(required=True, type='str'),
+ partner_node_name=dict(required=True, type='str')
+ )),
+ partner_cluster_name=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_metrocluster_dr_group',
+ version='9.8'))
+
+ def get_dr_group(self):
+ return_attrs = None
+ for pair in self.parameters['dr_pairs']:
+ api = 'cluster/metrocluster/dr-groups'
+ options = {'fields': '*',
+ 'dr_pairs.node.name': pair['node_name'],
+ 'dr_pairs.partner.name': pair['partner_node_name'],
+ 'partner_cluster.name': self.parameters['partner_cluster_name']}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'records' in message and message['num_records'] == 0:
+ continue
+ elif 'records' not in message or message['num_records'] != 1:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ record = message['records'][0]
+ return_attrs = {
+ 'partner_cluster_name': record['partner_cluster']['name'],
+ 'dr_pairs': [],
+ 'id': record['id']
+ }
+ for dr_pair in record['dr_pairs']:
+ return_attrs['dr_pairs'].append({'node_name': dr_pair['node']['name'], 'partner_node_name': dr_pair['partner']['name']})
+ # if we have an return_dr_id we don't need to loop anymore
+ break
+ return return_attrs
+
+ def get_dr_group_ids_from_nodes(self):
+ delete_ids = []
+ for pair in self.parameters['dr_pairs']:
+ api = 'cluster/metrocluster/nodes'
+ options = {'fields': '*',
+ 'node.name': pair['node_name']}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'records' in message and message['num_records'] == 0:
+ continue
+ elif 'records' not in message or message['num_records'] != 1:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ record = message['records'][0]
+ if int(record['dr_group_id']) not in delete_ids:
+ delete_ids.append(int(record['dr_group_id']))
+ return delete_ids
+
+ def create_dr_group(self):
+ api = 'cluster/metrocluster/dr-groups'
+ dr_pairs = []
+ for pair in self.parameters['dr_pairs']:
+ dr_pairs.append({'node': {'name': pair['node_name']},
+ 'partner': {'name': pair['partner_node_name']}})
+ partner_cluster = {'name': self.parameters['partner_cluster_name']}
+ data = {'dr_pairs': dr_pairs, 'partner_cluster': partner_cluster}
+ message, error = self.rest_api.post(api, data)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def delete_dr_groups(self, dr_ids):
+ for dr_id in dr_ids:
+ api = 'cluster/metrocluster/dr-groups/' + str(dr_id)
+ message, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def apply(self):
+ current = self.get_dr_group()
+ delete_ids = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and current is None and self.parameters['state'] == 'absent':
+ # check if there is some FC group to delete
+ delete_ids = self.get_dr_group_ids_from_nodes()
+ if delete_ids:
+ cd_action = 'delete'
+ self.na_helper.changed = True
+ elif cd_action == 'delete':
+ delete_ids = [current['id']]
+ if cd_action and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_dr_group()
+ if cd_action == 'delete':
+ self.delete_dr_groups(delete_ids)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppONTAPMetroClusterDRGroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py
new file mode 100644
index 00000000..617cf741
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_motd
+author:
+ - Piotr Olczak (@dprts) <polczak@redhat.com>
+ - NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+short_description: Setup motd
+description:
+ - This module allows you to manipulate motd for a vserver
+ - It also allows to manipulate motd at the cluster level by using the cluster vserver (cserver)
+version_added: 2.7.0
+requirements:
+ - netapp_lib
+options:
+ state:
+ description:
+ - If C(state=present) sets MOTD given in I(message) C(state=absent) removes it.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ motd_message:
+ description:
+ - MOTD Text message.
+ type: str
+ aliases:
+ - message
+ vserver:
+ description:
+ - The name of the SVM motd should be set for.
+ required: true
+ type: str
+ show_cluster_motd:
+ description:
+ - Set to I(false) if Cluster-level Message of the Day should not be shown
+ type: bool
+ default: True
+
+'''
+
+EXAMPLES = '''
+
+- name: Set Cluster-Level MOTD
+ na_ontap_motd:
+ vserver: my_ontap_cluster
+ motd_message: "Cluster wide MOTD"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: present
+ https: true
+
+- name: Set MOTD for I(rhev_nfs_krb) SVM, do not show Cluster-Level MOTD
+ na_ontap_motd:
+ vserver: rhev_nfs_krb
+ motd_message: "Access to rhev_nfs_krb is also restricted"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: present
+ show_cluster_motd: False
+ https: true
+
+- name: Remove Cluster-Level MOTD
+ na_ontap_motd:
+ vserver: my_ontap_cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: absent
+ https: true
+
+'''
+
+RETURN = '''
+
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPMotd(object):
+
+ def __init__(self):
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
+ vserver=dict(required=True, type='str'),
+ motd_message=dict(default='', type='str', aliases=['message']),
+ show_cluster_motd=dict(default=True, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def motd_get_iter(self):
+ """
+ Compose NaElement object to query current motd
+ :return: NaElement object for vserver-motd-get-iter
+ """
+ motd_get_iter = netapp_utils.zapi.NaElement('vserver-motd-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('is-cluster-message-enabled', str(self.parameters['show_cluster_motd']))
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_get_iter.add_child_elem(query)
+ return motd_get_iter
+
+ def motd_get(self):
+ """
+ Get current motd
+ :return: Dictionary of current motd details if query successful, else None
+ """
+ motd_get_iter = self.motd_get_iter()
+ motd_result = dict()
+ try:
+ result = self.server.invoke_successfully(motd_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching motd info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ motd_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-motd-info')
+ motd_result['motd_message'] = motd_info.get_child_content('message')
+ motd_result['motd_message'] = str(motd_result['motd_message']).rstrip()
+ motd_result['show_cluster_motd'] = True if motd_info.get_child_content(
+ 'is-cluster-message-enabled') == 'true' else False
+ motd_result['vserver'] = motd_info.get_child_content('vserver')
+ return motd_result
+ return None
+
+ def modify_motd(self):
+ motd_create = netapp_utils.zapi.NaElement('vserver-motd-modify-iter')
+ motd_create.add_new_child('message', self.parameters['motd_message'])
+ motd_create.add_new_child(
+ 'is-cluster-message-enabled', 'true' if self.parameters['show_cluster_motd'] is True else 'false')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_create.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(motd_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error creating motd: %s" % (to_native(err)), exception=traceback.format_exc())
+ return motd_create
+
+ def apply(self):
+ """
+ Applies action from playbook
+ """
+ netapp_utils.ems_log_event("na_ontap_motd", self.server)
+ current = self.motd_get()
+ if self.parameters['state'] == 'absent':
+ # Just make sure it is empty
+ self.parameters['motd_message'] = ''
+ if current and current['motd_message'] == 'None':
+ current = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.modify_motd()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ motd_obj = NetAppONTAPMotd()
+ motd_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py
new file mode 100644
index 00000000..3e56f636
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Modify Name Service Switch
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_name_service_switch
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified ns-switch should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ database_type:
+ description:
+ - Name services switch database.
+ choices: ['hosts','group', 'passwd', 'netgroup', 'namemap']
+ required: true
+ type: str
+ sources:
+ description:
+ - Type of sources.
+ - Possible values include files,dns,ldap,nis.
+ type: list
+ elements: str
+
+short_description: "NetApp ONTAP Manage name service switch"
+'''
+
+EXAMPLES = """
+ - name: create name service database
+ na_ontap_name_service_switch:
+ state: present
+ database_type: namemap
+ sources: files,ldap
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+
+ - name: modify name service database sources
+ na_ontap_name_service_switch:
+ state: present
+ database_type: namemap
+ sources: files
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNsswitch(object):
+ """
+ Class with NVMe service methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ database_type=dict(required=True, type='str', choices=['hosts', 'group', 'passwd', 'netgroup', 'namemap']),
+ sources=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_name_service_switch(self):
+ """
+ get current name service switch config
+ :return: dict of current name service switch
+ """
+ nss_iter = netapp_utils.zapi.NaElement('nameservice-nsswitch-get-iter')
+ nss_info = netapp_utils.zapi.NaElement('namservice-nsswitch-config-info')
+ db_type = netapp_utils.zapi.NaElement('nameservice-database')
+ db_type.set_content(self.parameters['database_type'])
+ query = netapp_utils.zapi.NaElement('query')
+ nss_info.add_child_elem(db_type)
+ query.add_child_elem(nss_info)
+ nss_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(nss_iter, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ nss_sources = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'namservice-nsswitch-config-info').get_child_by_name('nameservice-sources')
+ sources = [sources.get_content() for sources in nss_sources.get_children()]
+ return_value = {
+ 'sources': sources
+ }
+ return return_value
+
+ def create_name_service_switch(self):
+ """
+ create name service switch config
+ :return: None
+ """
+ nss_create = netapp_utils.zapi.NaElement('nameservice-nsswitch-create')
+ nss_create.add_new_child('nameservice-database', self.parameters['database_type'])
+ nss_sources = netapp_utils.zapi.NaElement('nameservice-sources')
+ nss_create.add_child_elem(nss_sources)
+ for source in self.parameters['sources']:
+ nss_sources.add_new_child('nss-source-type', source.strip())
+ try:
+ self.server.invoke_successfully(nss_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on creating name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_name_service_switch(self):
+ """
+ delete name service switch
+ :return: None
+ """
+ nss_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'nameservice-nsswitch-destroy', **{'nameservice-database': self.parameters['database_type']})
+ try:
+ self.server.invoke_successfully(nss_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on deleting name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_name_service_switch(self, modify):
+ """
+ modify name service switch
+ :param modify: dict of modify attributes
+ :return: None
+ """
+ nss_modify = netapp_utils.zapi.NaElement('nameservice-nsswitch-modify')
+ nss_modify.add_new_child('nameservice-database', self.parameters['database_type'])
+ nss_sources = netapp_utils.zapi.NaElement('nameservice-sources')
+ nss_modify.add_child_elem(nss_sources)
+ if 'sources' in modify:
+ for source in self.parameters['sources']:
+ nss_sources.add_new_child('nss-source-type', source.strip())
+ try:
+ self.server.invoke_successfully(nss_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on modifying name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_name_service_switch", self.server)
+ current = self.get_name_service_switch()
+ cd_action, modify = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_name_service_switch()
+ elif cd_action == 'delete':
+ self.delete_name_service_switch()
+ elif modify:
+ self.modify_name_service_switch(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Applyoperations from playbook'''
+ nss = NetAppONTAPNsswitch()
+ nss.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py
new file mode 100644
index 00000000..528b168e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py
@@ -0,0 +1,407 @@
+#!/usr/bin/python
+""" this is ndmp module
+
+ (c) 2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+
+DOCUMENTATION = '''
+---
+module: na_ontap_ndmp
+short_description: NetApp ONTAP NDMP services configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Modify NDMP Services.
+
+options:
+
+ vserver:
+ description:
+ - Name of the vserver.
+ required: true
+ type: str
+
+ abort_on_disk_error:
+ description:
+ - Enable abort on disk error.
+ type: bool
+
+ authtype:
+ description:
+ - Authentication type.
+ type: list
+ elements: str
+
+ backup_log_enable:
+ description:
+ - Enable backup log.
+ type: bool
+
+ data_port_range:
+ description:
+ - Data port range. Modification not supported for data Vservers.
+ type: str
+
+ debug_enable:
+ description:
+ - Enable debug.
+ type: bool
+
+ debug_filter:
+ description:
+ - Debug filter.
+ type: str
+
+ dump_detailed_stats:
+ description:
+ - Enable logging of VM stats for dump.
+ type: bool
+
+ dump_logical_find:
+ description:
+ - Enable logical find for dump.
+ type: str
+
+ enable:
+ description:
+ - Enable NDMP on vserver.
+ type: bool
+
+ fh_dir_retry_interval:
+ description:
+ - FH throttle value for dir.
+ type: int
+
+ fh_node_retry_interval:
+ description:
+ - FH throttle value for node.
+ type: int
+
+ ignore_ctime_enabled:
+ description:
+ - Ignore ctime.
+ type: bool
+
+ is_secure_control_connection_enabled:
+ description:
+ - Is secure control connection enabled.
+ type: bool
+
+ offset_map_enable:
+ description:
+ - Enable offset map.
+ type: bool
+
+ per_qtree_exclude_enable:
+ description:
+ - Enable per qtree exclusion.
+ type: bool
+
+ preferred_interface_role:
+ description:
+ - Preferred interface role.
+ type: list
+ elements: str
+
+ restore_vm_cache_size:
+ description:
+ - Restore VM file cache size. Value range [4-1024]
+ type: int
+
+ secondary_debug_filter:
+ description:
+ - Secondary debug filter.
+ type: str
+
+ tcpnodelay:
+ description:
+ - Enable TCP nodelay.
+ type: bool
+
+ tcpwinsize:
+ description:
+ - TCP window size.
+ type: int
+'''
+
+EXAMPLES = '''
+ - name: modify ndmp
+ na_ontap_ndmp:
+ vserver: ansible
+ hostname: "{{ hostname }}"
+ abort_on_disk_error: true
+ authtype: plaintext,challenge
+ backup_log_enable: true
+ data_port_range: 8000-9000
+ debug_enable: true
+ debug_filter: filter
+ dump_detailed_stats: true
+ dump_logical_find: default
+ enable: true
+ fh_dir_retry_interval: 100
+ fh_node_retry_interval: 100
+ ignore_ctime_enabled: true
+ is_secure_control_connection_enabled: true
+ offset_map_enable: true
+ per_qtree_exclude_enable: true
+ preferred_interface_role: node_mgmt,intercluster
+ restore_vm_cache_size: 1000
+ secondary_debug_filter: filter
+ tcpnodelay: true
+ tcpwinsize: 10000
+ username: user
+ password: pass
+ https: False
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNdmp(object):
+ '''
+ modify vserver cifs security
+ '''
+ def __init__(self):
+ self.use_rest = False
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.modifiable_options = dict(
+ abort_on_disk_error=dict(required=False, type='bool'),
+ authtype=dict(required=False, type='list', elements='str'),
+ backup_log_enable=dict(required=False, type='bool'),
+ data_port_range=dict(required=False, type='str'),
+ debug_enable=dict(required=False, type='bool'),
+ debug_filter=dict(required=False, type='str'),
+ dump_detailed_stats=dict(required=False, type='bool'),
+ dump_logical_find=dict(required=False, type='str'),
+ enable=dict(required=False, type='bool'),
+ fh_dir_retry_interval=dict(required=False, type='int'),
+ fh_node_retry_interval=dict(required=False, type='int'),
+ ignore_ctime_enabled=dict(required=False, type='bool'),
+ is_secure_control_connection_enabled=dict(required=False, type='bool'),
+ offset_map_enable=dict(required=False, type='bool'),
+ per_qtree_exclude_enable=dict(required=False, type='bool'),
+ preferred_interface_role=dict(required=False, type='list', elements='str'),
+ restore_vm_cache_size=dict(required=False, type='int'),
+ secondary_debug_filter=dict(required=False, type='str'),
+ tcpnodelay=dict(required=False, type='bool'),
+ tcpwinsize=dict(required=False, type='int')
+ )
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.argument_spec.update(self.modifiable_options)
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['abort_on_disk_error', 'backup_log_enable', 'data_port_range',
+ 'debug_enable', 'debug_filter', 'dump_detailed_stats',
+ 'dump_logical_find', 'fh_dir_retry_interval', 'fh_node_retry_interval',
+ 'ignore_ctime_enabled', 'is_secure_control_connection_enabled',
+ 'offset_map_enable', 'per_qtree_exclude_enable', 'preferred_interface_role',
+ 'restore_vm_cache_size', 'secondary_debug_filter', 'tcpnodelay', 'tcpwinsize']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ndmp_svm_uuid(self):
+
+ """
+ Get a svm's UUID
+ :return: uuid of the node
+ """
+ params = {'svm.name': self.parameters['vserver']}
+ api = "protocols/ndmp/svms"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if 'records' in message and len(message['records']) == 0:
+ self.module.fail_json(msg='Error fetching uuid for vserver %s: ' % (self.parameters['vserver']))
+ if len(message.keys()) == 0:
+ error = "No information collected from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ elif 'records' not in message:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ return message['records'][0]['svm']['uuid']
+
+ def ndmp_get_iter(self, uuid=None):
+ """
+ get current vserver ndmp attributes.
+ :return: a dict of ndmp attributes.
+ """
+ if self.use_rest:
+ data = dict()
+ params = {'fields': 'authentication_types,enabled'}
+ api = '/protocols/ndmp/svms/' + uuid
+ message, error = self.rest_api.get(api, params)
+ data['enable'] = message['enabled']
+ data['authtype'] = message['authentication_types']
+
+ if error:
+ self.module.fail_json(msg=error)
+ return data
+ else:
+ ndmp_get = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ ndmp_info = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-info')
+ ndmp_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(ndmp_info)
+ ndmp_get.add_child_elem(query)
+ ndmp_details = dict()
+ try:
+ result = self.server.invoke_successfully(ndmp_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching ndmp from %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ ndmp_attributes = result.get_child_by_name('attributes-list').get_child_by_name('ndmp-vserver-attributes-info')
+ self.get_ndmp_details(ndmp_details, ndmp_attributes)
+ return ndmp_details
+
+ def get_ndmp_details(self, ndmp_details, ndmp_attributes):
+ """
+ :param ndmp_details: a dict of current ndmp.
+ :param ndmp_attributes: ndmp returned from api call in xml format.
+ :return: None
+ """
+ for option in self.modifiable_options:
+ option_type = self.modifiable_options[option]['type']
+ if option_type == 'bool':
+ ndmp_details[option] = self.str_to_bool(ndmp_attributes.get_child_content(self.attribute_to_name(option)))
+ elif option_type == 'int':
+ ndmp_details[option] = int(ndmp_attributes.get_child_content(self.attribute_to_name(option)))
+ elif option_type == 'list':
+ child_list = ndmp_attributes.get_child_by_name(self.attribute_to_name(option))
+ values = [child.get_content() for child in child_list.get_children()]
+ ndmp_details[option] = values
+ else:
+ ndmp_details[option] = ndmp_attributes.get_child_content(self.attribute_to_name(option))
+
+ def modify_ndmp(self, modify):
+ """
+ :param modify: A list of attributes to modify
+ :return: None
+ """
+ if self.use_rest:
+ ndmp = dict()
+ uuid = self.get_ndmp_svm_uuid()
+ if self.parameters.get('enable'):
+ ndmp['enabled'] = self.parameters['enable']
+ if self.parameters.get('authtype'):
+ ndmp['authentication_types'] = self.parameters['authtype']
+ api = "protocols/ndmp/svms/" + uuid
+ dummy, error = self.rest_api.patch(api, ndmp)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+
+ ndmp_modify = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-modify')
+ for attribute in modify:
+ if attribute == 'authtype':
+ authtypes = netapp_utils.zapi.NaElement('authtype')
+ types = self.parameters['authtype']
+ for authtype in types:
+ authtypes.add_new_child('ndmpd-authtypes', authtype)
+ ndmp_modify.add_child_elem(authtypes)
+ elif attribute == 'preferred_interface_role':
+ preferred_interface_roles = netapp_utils.zapi.NaElement('preferred-interface-role')
+ roles = self.parameters['preferred_interface_role']
+ for role in roles:
+ preferred_interface_roles.add_new_child('netport-role', role)
+ ndmp_modify.add_child_elem(preferred_interface_roles)
+ else:
+ ndmp_modify.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(ndmp_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error modifying ndmp on %s: %s'
+ % (self.parameters['vserver'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ @staticmethod
+ def str_to_bool(value):
+ return value == 'true'
+
+ def apply(self):
+ """Call modify operations."""
+ uuid = None
+ if not self.use_rest:
+ self.asup_log_for_cserver("na_ontap_ndmp")
+ if self.use_rest:
+ # we only have the svm name, we need to the the uuid for the svm
+ uuid = self.get_ndmp_svm_uuid()
+ current = self.ndmp_get_iter(uuid=uuid)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify:
+ self.modify_ndmp(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ obj = NetAppONTAPNdmp()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py
new file mode 100644
index 00000000..675fa44e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_net_ifgrp
+short_description: NetApp Ontap modify network interface group
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, modify ports, destroy the network interface group
+options:
+ state:
+ description:
+ - Whether the specified network interface group should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ distribution_function:
+ description:
+ - Specifies the traffic distribution function for the ifgrp.
+ choices: ['mac', 'ip', 'sequential', 'port']
+ type: str
+
+ name:
+ description:
+ - Specifies the interface group name.
+ required: true
+ type: str
+
+ mode:
+ description:
+ - Specifies the link policy for the ifgrp.
+ type: str
+
+ node:
+ description:
+ - Specifies the name of node.
+ required: true
+ type: str
+
+ ports:
+ aliases:
+ - port
+ description:
+ - List of expected ports to be present in the interface group.
+ - If a port is present in this list, but not on the target, it will be added.
+ - If a port is not in the list, but present on the target, it will be removed.
+ - Make sure the list contains all ports you want to see on the target.
+ version_added: 2.8.0
+ type: list
+ elements: str
+"""
+
+EXAMPLES = """
+ - name: create ifgrp
+ na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ distribution_function: ip
+ name: a0c
+ ports: [e0a]
+ mode: multimode
+ node: "{{ Vsim node name }}"
+ - name: modify ports in an ifgrp
+ na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ distribution_function: ip
+ name: a0c
+ port: [e0a, e0c]
+ mode: multimode
+ node: "{{ Vsim node name }}"
+ - name: delete ifgrp
+ na_ontap_net_ifgrp:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: a0c
+ node: "{{ Vsim node name }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapIfGrp(object):
+ """
+ Create, Modifies and Destroys a IfGrp
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap IfGrp class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ distribution_function=dict(required=False, type='str', choices=['mac', 'ip', 'sequential', 'port']),
+ name=dict(required=True, type='str'),
+ mode=dict(required=False, type='str'),
+ node=dict(required=True, type='str'),
+ ports=dict(required=False, type='list', elements='str', aliases=["port"]),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['distribution_function', 'mode'])
+ ],
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_if_grp(self):
+ """
+ Return details about the if_group
+ :param:
+ name : Name of the if_group
+
+ :return: Details about the if_group. None if not found.
+ :rtype: dict
+ """
+ if_group_iter = netapp_utils.zapi.NaElement('net-port-get-iter')
+ if_group_info = netapp_utils.zapi.NaElement('net-port-info')
+ if_group_info.add_new_child('port', self.parameters['name'])
+ if_group_info.add_new_child('port-type', 'if_group')
+ if_group_info.add_new_child('node', self.parameters['node'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(if_group_info)
+ if_group_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(if_group_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+
+ if result.get_child_by_name('num-records') and int(result['num-records']) >= 1:
+ if_group_attributes = result['attributes-list']['net-port-info']
+ return_value = {
+ 'name': if_group_attributes['port'],
+ 'distribution_function': if_group_attributes['ifgrp-distribution-function'],
+ 'mode': if_group_attributes['ifgrp-mode'],
+ 'node': if_group_attributes['node'],
+ }
+
+ return return_value
+
+ def get_if_grp_ports(self):
+ """
+ Return ports of the if_group
+ :param:
+ name : Name of the if_group
+ :return: Ports of the if_group. None if not found.
+ :rtype: dict
+ """
+ if_group_iter = netapp_utils.zapi.NaElement('net-port-ifgrp-get')
+ if_group_iter.add_new_child('ifgrp-name', self.parameters['name'])
+ if_group_iter.add_new_child('node', self.parameters['node'])
+ try:
+ result = self.server.invoke_successfully(if_group_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting if_group ports %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ port_list = []
+ if result.get_child_by_name('attributes'):
+ if_group_attributes = result['attributes']['net-ifgrp-info']
+ if if_group_attributes.get_child_by_name('ports'):
+ ports = if_group_attributes.get_child_by_name('ports').get_children()
+ for each in ports:
+ port_list.append(each.get_content())
+ return {'ports': port_list}
+
+ def create_if_grp(self):
+ """
+ Creates a new ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-create")
+ route_obj.add_new_child("distribution-function", self.parameters['distribution_function'])
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("mode", self.parameters['mode'])
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('ports') is not None:
+ for port in self.parameters.get('ports'):
+ self.add_port_to_if_grp(port)
+
+ def delete_if_grp(self):
+ """
+ Deletes a ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-destroy")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_port_to_if_grp(self, port):
+ """
+ adds port to a ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-add-port")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("port", port)
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port %s to if_group %s: %s' %
+ (port, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ports(self, current_ports):
+ add_ports = set(self.parameters['ports']) - set(current_ports)
+ remove_ports = set(current_ports) - set(self.parameters['ports'])
+ for port in add_ports:
+ self.add_port_to_if_grp(port)
+ for port in remove_ports:
+ self.remove_port_to_if_grp(port)
+
+ def remove_port_to_if_grp(self, port):
+ """
+ removes port from a ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-remove-port")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("port", port)
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port %s to if_group %s: %s' %
+ (port, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_net_ifgrp", cserver)
+
+ def apply(self):
+ self.autosupport_log()
+ current, modify = self.get_if_grp(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ current_ports = self.get_if_grp_ports()
+ modify = self.na_helper.get_modified_attributes(current_ports, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_if_grp()
+ elif cd_action == 'delete':
+ self.delete_if_grp()
+ elif modify:
+ self.modify_ports(current_ports['ports'])
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapIfGrp()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py
new file mode 100644
index 00000000..27b10174
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_net_port
+short_description: NetApp ONTAP network ports.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify a ONTAP network port.
+options:
+ state:
+ description:
+ - Whether the specified net port should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+ node:
+ description:
+ - Specifies the name of node.
+ required: true
+ type: str
+ ports:
+ aliases:
+ - port
+ description:
+ - Specifies the name of port(s).
+ required: true
+ type: list
+ elements: str
+ mtu:
+ description:
+ - Specifies the maximum transmission unit (MTU) reported by the port.
+ type: str
+ autonegotiate_admin:
+ description:
+ - Enables or disables Ethernet auto-negotiation of speed,
+ duplex and flow control.
+ type: str
+ duplex_admin:
+ description:
+ - Specifies the user preferred duplex setting of the port.
+ - Valid values auto, half, full
+ type: str
+ speed_admin:
+ description:
+ - Specifies the user preferred speed setting of the port.
+ type: str
+ flowcontrol_admin:
+ description:
+ - Specifies the user preferred flow control setting of the port.
+ type: str
+ ipspace:
+ description:
+ - Specifies the port's associated IPspace name.
+ - The 'Cluster' ipspace is reserved for cluster ports.
+ type: str
+"""
+
+EXAMPLES = """
+ - name: Modify Net Port
+ na_ontap_net_port:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ node: "{{ node_name }}"
+ ports: e0d,e0c
+ autonegotiate_admin: true
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNetPort(object):
+ """
+ Modify a Net port
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap Net Port Class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ node=dict(required=True, type="str"),
+ ports=dict(required=True, type='list', elements='str', aliases=['port']),
+ mtu=dict(required=False, type="str", default=None),
+ autonegotiate_admin=dict(required=False, type="str", default=None),
+ duplex_admin=dict(required=False, type="str", default=None),
+ speed_admin=dict(required=False, type="str", default=None),
+ flowcontrol_admin=dict(required=False, type="str", default=None),
+ ipspace=dict(required=False, type="str", default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'mtu': 'mtu',
+ 'autonegotiate_admin': 'is-administrative-auto-negotiate',
+ 'duplex_admin': 'administrative-duplex',
+ 'speed_admin': 'administrative-speed',
+ 'flowcontrol_admin': 'administrative-flowcontrol',
+ 'ipspace': 'ipspace'
+ }
+
+ def get_net_port(self, port):
+ """
+ Return details about the net port
+ :param: port: Name of the port
+ :return: Dictionary with current state of the port. None if not found.
+ :rtype: dict
+ """
+ net_port_get = netapp_utils.zapi.NaElement('net-port-get-iter')
+ attributes = {
+ 'query': {
+ 'net-port-info': {
+ 'node': self.parameters['node'],
+ 'port': port
+ }
+ }
+ }
+ net_port_get.translate_struct(attributes)
+
+ try:
+ result = self.server.invoke_successfully(net_port_get, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ port_info = result['attributes-list']['net-port-info']
+ port_details = dict()
+ else:
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting net ports for %s: %s' % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ port_details[item_key] = port_info.get_child_content(zapi_key)
+ return port_details
+
+ def modify_net_port(self, port, modify):
+ """
+ Modify a port
+
+ :param port: Name of the port
+ :param modify: dict with attributes to be modified
+ :return: None
+ """
+ port_modify = netapp_utils.zapi.NaElement('net-port-modify')
+ port_attributes = {'node': self.parameters['node'],
+ 'port': port}
+ for key in modify:
+ if key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(key)
+ port_attributes[zapi_key] = modify[key]
+ port_modify.translate_struct(port_attributes)
+ try:
+ self.server.invoke_successfully(port_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying net ports for %s: %s' % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ AutoSupport log for na_ontap_net_port
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_net_port", cserver)
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+
+ self.autosupport_log()
+ # Run the task for all ports in the list of 'ports'
+ for port in self.parameters['ports']:
+ current = self.get_net_port(port)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify:
+ self.modify_net_port(port, modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create the NetApp Ontap Net Port Object and modify it
+ """
+ obj = NetAppOntapNetPort()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py
new file mode 100644
index 00000000..58eed34e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_net_routes
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_net_routes
+short_description: NetApp ONTAP network routes
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify ONTAP network routes.
+options:
+ state:
+ description:
+ - Whether you want to create or delete a network route.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ vserver:
+ description:
+ - The name of the vserver.
+ required: true
+ type: str
+ destination:
+ description:
+ - Specify the route destination.
+ - Example 10.7.125.5/20, fd20:13::/64.
+ required: true
+ type: str
+ gateway:
+ description:
+ - Specify the route gateway.
+ - Example 10.7.125.1, fd20:13::1.
+ required: true
+ type: str
+ metric:
+ description:
+ - Specify the route metric.
+ - If this field is not provided the default will be set to 20.
+ type: int
+ from_destination:
+ description:
+ - Specify the route destination that should be changed.
+ - new_destination was removed to fix idempotency issues. To rename destination the original goes to from_destination and the new goes to destination.
+ version_added: 2.8.0
+ type: str
+ from_gateway:
+ description:
+ - Specify the route gateway that should be changed.
+ version_added: 2.8.0
+ type: str
+ from_metric:
+ description:
+ - Specify the route metric that should be changed.
+ version_added: 2.8.0
+ type: int
+'''
+
+EXAMPLES = """
+ - name: create route
+ na_ontap_net_routes:
+ state: present
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ destination: 10.7.125.5/20
+ gateway: 10.7.125.1
+ metric: 30
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNetRoutes(object):
+ """
+ Create, Modifies and Destroys a Net Route
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap Net Route class
+ """
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ destination=dict(required=True, type='str'),
+ gateway=dict(required=True, type='str'),
+ metric=dict(required=False, type='int'),
+ from_destination=dict(required=False, type='str', default=None),
+ from_gateway=dict(required=False, type='str', default=None),
+ from_metric=dict(required=False, type='int', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['metric', 'from_metric']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+
+ if error is not None:
+ self.module.fail_json(msg=error)
+
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def create_net_route(self, current_metric=None):
+ """
+ Creates a new Route
+ """
+ if self.use_rest:
+ api = "network/ip/routes"
+ params = {'gateway': self.parameters['gateway'],
+ 'svm': self.parameters['vserver']}
+ if self.parameters.get('destination') is not None:
+ dest = self.parameters['destination'].split('/')
+ params['destination'] = {'address': dest[0], 'netmask': dest[1]}
+ __, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ route_obj = netapp_utils.zapi.NaElement('net-routes-create')
+ route_obj.add_new_child("destination", self.parameters['destination'])
+ route_obj.add_new_child("gateway", self.parameters['gateway'])
+ if current_metric is None and self.parameters.get('metric') is not None:
+ metric = self.parameters['metric']
+ else:
+ metric = current_metric
+ # Metric can be None, Can't set metric to none
+ if metric is not None:
+ route_obj.add_new_child("metric", str(metric))
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating net route: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_net_route(self, params):
+ """
+ Deletes a given Route
+ """
+ if self.use_rest:
+ uuid = params['uuid']
+ api = "network/ip/routes/" + uuid
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ route_obj = netapp_utils.zapi.NaElement('net-routes-destroy')
+ if params is None:
+ params = self.parameters
+ route_obj.add_new_child("destination", params['destination'])
+ route_obj.add_new_child("gateway", params['gateway'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting net route: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_net_route(self, current, desired):
+ """
+ Modify a net route
+ Since we cannot modify a route, we are deleting the existing route, and creating a new one.
+ """
+ if self.use_rest:
+ if desired.get('destination') is not None:
+ dest = desired['destination'].split('/')
+ if dest[0] != current['destination']['address'] or dest[1] != current['destination']['netmask']:
+ self.na_helper.changed = True
+ self.parameters['destination'] = desired['destination']
+ else:
+ self.parameters['destination'] = '%s/%s' % (current['destination']['address'],
+ current['destination']['netmask'])
+ if desired.get('gateway') is not None:
+ if desired['gateway'] != current['gateway']:
+ self.na_helper.changed = True
+ self.parameters['gateway'] = desired['gateway']
+ else:
+ self.parameters['gateway'] = current['gateway']
+ if not self.na_helper.changed or self.module.check_mode:
+ return
+ params = {'destination': '%s/%s' % (current['destination']['address'], current['destination']['netmask']),
+ 'gateway': current['gateway']}
+ target = self.get_net_route(params)
+ self.delete_net_route(target)
+ self.create_net_route()
+ return
+
+ else:
+ # return if there is nothing to change
+ for key, val in desired.items():
+ if val != current[key]:
+ self.na_helper.changed = True
+ break
+ if not self.na_helper.changed or self.module.check_mode:
+ return
+ # delete and re-create with new params
+ self.delete_net_route(current)
+ route_obj = netapp_utils.zapi.NaElement('net-routes-create')
+ for attribute in ['metric', 'destination', 'gateway']:
+ if desired.get(attribute) is not None:
+ value = desired[attribute]
+ else:
+ value = current[attribute]
+ route_obj.add_new_child(attribute, str(value))
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ # restore the old route, create the route with the existing metric
+ self.create_net_route(current['metric'])
+ # return if desired route already exists
+ if to_native(error.code) == '13001':
+ return
+ # Invalid value specified for any of the attributes
+ self.module.fail_json(msg='Error modifying net route: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_net_route(self, params=None):
+ """
+ Checks to see if a route exist or not
+ :return: NaElement object if a route exists, None otherwise
+ """
+ if params is not None:
+ # we need either destination or gateway to fetch desired route
+ if params.get('destination') is None and params.get('gateway') is None:
+ return None
+ if self.use_rest:
+ api = "network/ip/routes"
+ data = {'fields': 'destination,gateway,svm'}
+ message, error = self.rest_api.get(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response in get_net_route from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ if params is None:
+ params = self.parameters
+ else:
+ if params.get('destination') is None:
+ params['destination'] = self.parameters['destination']
+ if params.get('gateway') is None:
+ params['gateway'] = self.parameters['gateway']
+ params['vserver'] = self.parameters['vserver']
+ for record in message['records']:
+ if record['gateway'] == params['gateway'] and \
+ record['destination']['address'] == params['destination'].split('/')[0] and \
+ record.get('svm') and record['svm']['name'] == params['vserver']:
+ return record
+ return None
+ else:
+ current = None
+ route_obj = netapp_utils.zapi.NaElement('net-routes-get')
+ for attr in ['destination', 'gateway']:
+ if params and params.get(attr) is not None:
+ value = params[attr]
+ else:
+ value = self.parameters[attr]
+ route_obj.add_new_child(attr, value)
+ try:
+ result = self.server.invoke_successfully(route_obj, True)
+ if result.get_child_by_name('attributes') is not None:
+ route_info = result.get_child_by_name('attributes').get_child_by_name('net-vs-routes-info')
+ current = {
+ 'destination': route_info.get_child_content('destination'),
+ 'gateway': route_info.get_child_content('gateway'),
+ 'metric': int(route_info.get_child_content('metric'))
+ }
+
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13040 denotes a route doesn't exist.
+ if to_native(error.code) == "15661":
+ return None
+ self.module.fail_json(msg='Error fetching net route: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ return current
+
+ @staticmethod
+ def is_modify_action(current, desired):
+ """
+ Get desired action to be applied for net routes
+ Destination and gateway are unique params for a route and cannot be duplicated
+ So if a route with desired destination or gateway exists already, we don't try to modify
+ :param current: current details
+ :param desired: desired details
+ :return: create / delete / modify / None
+ """
+ if current is None and desired is None:
+ # this is invalid
+ # cannot modify a non existent resource
+ return None
+ if current is None and desired is not None:
+ # idempotency or duplication
+ # we need not create
+ return False
+ if current is not None and desired is not None:
+ # we can't modify an ambiguous route (idempotency/duplication)
+ return False
+ return True
+
+ def get_params_to_be_modified(self, current):
+ """
+ Get parameters and values that need to be modified
+ :param current: current details
+ :return: dict(), None
+ """
+ if current is None:
+ return None
+ desired = dict()
+ if self.parameters.get('new_destination') is not None and \
+ self.parameters['new_destination'] != current['destination']:
+ desired['destination'] = self.parameters['new_destination']
+ if self.parameters.get('new_gateway') is not None and \
+ self.parameters['new_gateway'] != current['gateway']:
+ desired['gateway'] = self.parameters['new_gateway']
+ if self.parameters.get('new_metric') is not None and \
+ self.parameters['new_metric'] != current['metric']:
+ desired['metric'] = self.parameters['new_metric']
+ return desired
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_net_routes", self.server)
+ current = self.get_net_route()
+ modify, cd_action = None, None
+ if self.use_rest:
+ modify_params = {'gateway': self.parameters.get('from_gateway'),
+ 'destination': self.parameters.get('from_destination')}
+ if any(modify_params.values()):
+ # destination and gateway combination is unique, and is considered like a id. so modify destination
+ # or gateway is considered a rename action.
+ old_params = self.get_net_route(modify_params)
+ modify = self.na_helper.is_rename_action(old_params, current)
+ if modify is None:
+ self.module.fail_json(msg="Error modifying: route %s does not exist" % self.parameters['from_destination'])
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ else:
+ modify_params = {'destination': self.parameters.get('from_destination'),
+ 'gateway': self.parameters.get('from_gateway'),
+ 'metric': self.parameters.get('from_metric')}
+ # if any from_* param is present in playbook, check for modify action
+ if any(modify_params.values()):
+ # destination and gateway combination is unique, and is considered like a id. so modify destination
+ # or gateway is considered a rename action. metric is considered an attribute of the route so it is
+ # considered as modify.
+ if modify_params.get('metric') is not None:
+ modify = True
+ old_params = current
+ else:
+ # get parameters that are eligible for modify
+ old_params = self.get_net_route(modify_params)
+ modify = self.na_helper.is_rename_action(old_params, current)
+ if modify is None:
+ self.module.fail_json(msg="Error modifying: route %s does not exist" % self.parameters['from_destination'])
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create':
+ if not self.module.check_mode:
+ self.create_net_route()
+ elif cd_action == 'delete':
+ if not self.module.check_mode:
+ self.delete_net_route(current)
+ elif modify:
+ desired = {}
+ for key, value in old_params.items():
+ desired[key] = value
+ for key, value in modify_params.items():
+ if value is not None:
+ desired[key] = self.parameters.get(key)
+ self.modify_net_route(old_params, desired)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapNetRoutes()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py
new file mode 100644
index 00000000..bd52ef34
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_net_subnet
+short_description: NetApp ONTAP Create, delete, modify network subnets.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: Storage Engineering (@Albinpopote) <ansible@black-perl.fr>
+description:
+- Create, modify, destroy the network subnet
+options:
+ state:
+ description:
+ - Whether the specified network interface group should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ broadcast_domain:
+ description:
+ - Specify the required broadcast_domain name for the subnet.
+ - A broadcast domain can not be modified after the subnet has been created
+ type: str
+
+ name:
+ description:
+ - Specify the subnet name.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the subnet to be renamed
+ type: str
+
+ gateway:
+ description:
+ - Specify the gateway for the default route of the subnet.
+ type: str
+
+ ipspace:
+ description:
+ - Specify the ipspace for the subnet.
+ - The default value for this parameter is the default IPspace, named 'Default'.
+ type: str
+
+ ip_ranges:
+ description:
+ - Specify the list of IP address ranges associated with the subnet.
+ type: list
+ elements: str
+
+ subnet:
+ description:
+ - Specify the subnet (ip and mask).
+ type: str
+"""
+
+EXAMPLES = """
+ - name: create subnet
+ na_ontap_net_subnet:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ subnet: 10.10.10.0/24
+ name: subnet-adm
+ ip_ranges: [ '10.10.10.30-10.10.10.40', '10.10.10.51' ]
+ gateway: 10.10.10.254
+ ipspace: Default
+ broadcast_domain: Default
+ - name: delete subnet
+ na_ontap_net_subnet:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: subnet-adm
+ ipspace: Default
+ - name: rename subnet
+ na_ontap_net_subnet:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: subnet-adm-new
+ from_name: subnet-adm
+ ipspace: Default
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSubnet(object):
+ """
+ Create, Modifies and Destroys a subnet
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Subnet class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ broadcast_domain=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ ip_ranges=dict(required=False, type='list', elements='str'),
+ ipspace=dict(required=False, type='str'),
+ subnet=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_subnet(self, name=None):
+ """
+ Return details about the subnet
+ :param:
+ name : Name of the subnet
+ :return: Details about the subnet. None if not found.
+ :rtype: dict
+ """
+ if name is None:
+ name = self.parameters.get('name')
+
+ subnet_iter = netapp_utils.zapi.NaElement('net-subnet-get-iter')
+ subnet_info = netapp_utils.zapi.NaElement('net-subnet-info')
+ subnet_info.add_new_child('subnet-name', name)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(subnet_info)
+
+ subnet_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(subnet_iter, True)
+ return_value = None
+ # check if query returns the expected subnet
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ subnet_attributes = result.get_child_by_name('attributes-list').get_child_by_name('net-subnet-info')
+ broadcast_domain = subnet_attributes.get_child_content('broadcast-domain')
+ gateway = subnet_attributes.get_child_content('gateway')
+ ipspace = subnet_attributes.get_child_content('ipspace')
+ subnet = subnet_attributes.get_child_content('subnet')
+ name = subnet_attributes.get_child_content('subnet-name')
+
+ ip_ranges = []
+ if subnet_attributes.get_child_by_name('ip-ranges'):
+ range_obj = subnet_attributes.get_child_by_name('ip-ranges').get_children()
+ for elem in range_obj:
+ ip_ranges.append(elem.get_content())
+
+ return_value = {
+ 'name': name,
+ 'broadcast_domain': broadcast_domain,
+ 'gateway': gateway,
+ 'ip_ranges': ip_ranges,
+ 'ipspace': ipspace,
+ 'subnet': subnet
+ }
+
+ return return_value
+
+ def create_subnet(self):
+ """
+ Creates a new subnet
+ """
+ options = {'subnet-name': self.parameters.get('name'),
+ 'broadcast-domain': self.parameters.get('broadcast_domain'),
+ 'subnet': self.parameters.get('subnet')}
+ subnet_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-create', **options)
+
+ if self.parameters.get('gateway'):
+ subnet_create.add_new_child('gateway', self.parameters.get('gateway'))
+ if self.parameters.get('ip_ranges'):
+ subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
+ subnet_create.add_child_elem(subnet_ips)
+ for ip_range in self.parameters.get('ip_ranges'):
+ subnet_ips.add_new_child('ip-range', ip_range)
+ if self.parameters.get('ipspace'):
+ subnet_create.add_new_child('ipspace', self.parameters.get('ipspace'))
+
+ try:
+ self.server.invoke_successfully(subnet_create, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_subnet(self):
+ """
+ Deletes a subnet
+ """
+ subnet_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-destroy', **{'subnet-name': self.parameters.get('name')})
+
+ try:
+ self.server.invoke_successfully(subnet_delete, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_subnet(self):
+ """
+ Modifies a subnet
+ """
+ options = {'subnet-name': self.parameters.get('name')}
+
+ subnet_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-modify', **options)
+
+ if self.parameters.get('gateway'):
+ subnet_modify.add_new_child('gateway', self.parameters.get('gateway'))
+ if self.parameters.get('ip_ranges'):
+ subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
+ subnet_modify.add_child_elem(subnet_ips)
+ for ip_range in self.parameters.get('ip_ranges'):
+ subnet_ips.add_new_child('ip-range', ip_range)
+ if self.parameters.get('ipspace'):
+ subnet_modify.add_new_child('ipspace', self.parameters.get('ipspace'))
+ if self.parameters.get('subnet'):
+ subnet_modify.add_new_child('subnet', self.parameters.get('subnet'))
+
+ try:
+ self.server.invoke_successfully(subnet_modify, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_subnet(self):
+ """
+ TODO
+ """
+ options = {'subnet-name': self.parameters.get('from_name'),
+ 'new-name': self.parameters.get('name')}
+
+ subnet_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-rename', **options)
+
+ if self.parameters.get('ipspace'):
+ subnet_rename.add_new_child('ipspace', self.parameters.get('ipspace'))
+
+ try:
+ self.server.invoke_successfully(subnet_rename, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Apply action to subnet'''
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_net_subnet", cserver)
+ current = self.get_subnet()
+ cd_action, rename = None, None
+
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_subnet(self.parameters.get('from_name')), current)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming: subnet %s does not exist" %
+ self.parameters.get('from_name'))
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ for attribute in modify:
+ if attribute in ['broadcast_domain']:
+ self.module.fail_json(msg='Error modifying subnet %s: cannot modify broadcast_domain parameter.' % self.parameters.get('name'))
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_subnet()
+ # If rename is True, cd_action is NOne but modify could be true
+ if cd_action == 'create':
+ for attribute in ['subnet', 'broadcast_domain']:
+ if not self.parameters.get(attribute):
+ self.module.fail_json(msg='Error - missing required arguments: %s.' % attribute)
+ self.create_subnet()
+ elif cd_action == 'delete':
+ self.delete_subnet()
+ elif modify:
+ self.modify_subnet()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp ONTAP Net Route object and runs the correct play task
+ """
+ subnet_obj = NetAppOntapSubnet()
+ subnet_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py
new file mode 100644
index 00000000..fe6e9e3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_net_vlan
+short_description: NetApp ONTAP network VLAN
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create or Delete a network VLAN
+options:
+ state:
+ description:
+ - Whether the specified network VLAN should exist or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ parent_interface:
+ description:
+ - The interface that hosts the VLAN interface.
+ required: true
+ type: str
+ vlanid:
+ description:
+ - The VLAN id. Ranges from 1 to 4094.
+ required: true
+ type: str
+ node:
+ description:
+ - Node name of VLAN interface.
+ required: true
+ type: str
+notes:
+ - The C(interface_name) option has been removed and should be deleted from playbooks
+'''
+
+EXAMPLES = """
+ - name: create VLAN
+ na_ontap_net_vlan:
+ state: present
+ vlanid: 13
+ node: "{{ vlan node }}"
+ parent_interface: "{{ vlan parent interface name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVlan(object):
+ """
+ Created, and destorys Net Vlans's
+ """
+ def __init__(self):
+ """
+ Initializes the NetAppOntapVlan function
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ parent_interface=dict(required=True, type='str'),
+ vlanid=dict(required=True, type='str'),
+ node=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.parent_interface = p['parent_interface']
+ self.vlanid = p['vlanid']
+ self.node = p['node']
+ self.interface_name = str(p['parent_interface']) + '-' + str(self.vlanid)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def create_vlan(self):
+ """
+ Creates a new vlan
+ """
+ vlan_obj = netapp_utils.zapi.NaElement("net-vlan-create")
+ vlan_info = self.create_vlan_info()
+
+ vlan_obj.add_child_elem(vlan_info)
+ self.server.invoke_successfully(vlan_obj, True)
+
+ def delete_vlan(self):
+ """
+ Deletes a vland
+ """
+ vlan_obj = netapp_utils.zapi.NaElement("net-vlan-delete")
+ vlan_info = self.create_vlan_info()
+
+ vlan_obj.add_child_elem(vlan_info)
+ self.server.invoke_successfully(vlan_obj, True)
+
+ def does_vlan_exist(self):
+ """
+ Checks to see if a vlan already exists or not
+ :return: Returns True if the vlan exists, false if it dosn't
+ """
+ vlan_obj = netapp_utils.zapi.NaElement("net-vlan-get")
+ vlan_obj.add_new_child("interface-name", self.interface_name)
+ vlan_obj.add_new_child("node", self.node)
+ try:
+ result = self.server.invoke_successfully(vlan_obj, True)
+ result.get_child_by_name("attributes").get_child_by_name("vlan-info").get_child_by_name("interface-name")
+ except netapp_utils.zapi.NaApiError:
+ return False
+ return True
+
+ def create_vlan_info(self):
+ """
+ Create a vlan_info object to be used in a create/delete
+ :return:
+ """
+ vlan_info = netapp_utils.zapi.NaElement("vlan-info")
+
+ # set up the vlan_info object:
+ vlan_info.add_new_child("parent-interface", self.parent_interface)
+ vlan_info.add_new_child("vlanid", self.vlanid)
+ vlan_info.add_new_child("node", self.node)
+ return vlan_info
+
+ def apply(self):
+ """
+ check the option in the playbook to see what needs to be done
+ :return:
+ """
+ changed = False
+ result = None
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_net_vlan", cserver)
+ existing_vlan = self.does_vlan_exist()
+ if existing_vlan:
+ if self.state == 'absent': # delete
+ changed = True
+ else:
+ if self.state == 'present': # create
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ self.create_vlan()
+ elif self.state == 'absent':
+ self.delete_vlan()
+ self.module.exit_json(changed=changed, meta=result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap vlan object, and runs the correct play task.
+ """
+ v = NetAppOntapVlan()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py
new file mode 100644
index 00000000..c439cba1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py
@@ -0,0 +1,599 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = """
+module: na_ontap_nfs
+short_description: NetApp ONTAP NFS status
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Enable or disable NFS on ONTAP
+options:
+ state:
+ description:
+ - Whether NFS should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ service_state:
+ description:
+ - Whether the specified NFS should be enabled or disabled. Creates NFS service if doesnt exist.
+ choices: ['started', 'stopped']
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ nfsv3:
+ description:
+ - status of NFSv3.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv3_fsid_change:
+ description:
+ - status of if NFSv3 clients see change in FSID as they traverse filesystems.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv4_fsid_change:
+ description:
+ - status of if NFSv4 clients see change in FSID as they traverse filesystems.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv4:
+ description:
+ - status of NFSv4.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv41:
+ description:
+ - status of NFSv41.
+ aliases: ['nfsv4.1']
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv41_pnfs:
+ description:
+ - status of NFSv41 pNFS.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv4_numeric_ids:
+ description:
+ - status of NFSv4 numeric ID's.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ vstorage_state:
+ description:
+ - status of vstorage_state.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv4_id_domain:
+ description:
+ - Name of the nfsv4_id_domain to use.
+ type: str
+ nfsv40_acl:
+ description:
+ - status of NFS v4.0 ACL feature
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_read_delegation:
+ description:
+ - status for NFS v4.0 read delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_write_delegation:
+ description:
+ - status for NFS v4.0 write delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_acl:
+ description:
+ - status of NFS v4.1 ACL feature
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_read_delegation:
+ description:
+ - status for NFS v4.1 read delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_write_delegation:
+ description:
+ - status for NFS v4.1 write delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_referrals:
+ description:
+ - status for NFS v4.0 referrals.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv41_referrals:
+ description:
+ - status for NFS v4.1 referrals.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ tcp:
+ description:
+ - Enable TCP (support from ONTAP 9.3 onward).
+ choices: ['enabled', 'disabled']
+ type: str
+ udp:
+ description:
+ - Enable UDP (support from ONTAP 9.3 onward).
+ choices: ['enabled', 'disabled']
+ type: str
+ showmount:
+ description:
+ - Whether SVM allows showmount
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ tcp_max_xfer_size:
+ description:
+ - TCP Maximum Transfer Size (bytes). The default value is 65536.
+ version_added: 2.8.0
+ type: int
+
+"""
+
+EXAMPLES = """
+ - name: change nfs status
+ na_ontap_nfs:
+ state: present
+ service_state: stopped
+ vserver: vs_hack
+ nfsv3: disabled
+ nfsv4: disabled
+ nfsv41: enabled
+ tcp: disabled
+ udp: disabled
+ vstorage_state: disabled
+ nfsv4_id_domain: example.com
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNFS(object):
+ """ object initialize and class methods """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, type='str', choices=['started', 'stopped']),
+ vserver=dict(required=True, type='str'),
+ nfsv3=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv3_fsid_change=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4_fsid_change=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41=dict(required=False, type='str', default=None, choices=['enabled', 'disabled'], aliases=['nfsv4.1']),
+ nfsv41_pnfs=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4_numeric_ids=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ vstorage_state=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ tcp=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ udp=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ nfsv4_id_domain=dict(required=False, type='str', default=None),
+ nfsv40_acl=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_read_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_referrals=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_write_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_acl=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_read_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_referrals=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_write_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ showmount=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ tcp_max_xfer_size=dict(required=False, default=None, type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ parameters = self.module.params
+
+ # set up service_state variables
+ self.state = parameters['state']
+ self.service_state = parameters['service_state']
+ self.vserver = parameters['vserver']
+ self.nfsv3 = parameters['nfsv3']
+ self.nfsv3_fsid_change = parameters['nfsv3_fsid_change']
+ self.nfsv4_fsid_change = parameters['nfsv4_fsid_change']
+ self.nfsv4 = parameters['nfsv4']
+ self.nfsv41 = parameters['nfsv41']
+ self.vstorage_state = parameters['vstorage_state']
+ self.nfsv4_id_domain = parameters['nfsv4_id_domain']
+ self.udp = parameters['udp']
+ self.tcp = parameters['tcp']
+ self.nfsv40_acl = parameters['nfsv40_acl']
+ self.nfsv40_read_delegation = parameters['nfsv40_read_delegation']
+ self.nfsv40_referrals = parameters['nfsv40_referrals']
+ self.nfsv40_write_delegation = parameters['nfsv40_write_delegation']
+ self.nfsv41_acl = parameters['nfsv41_acl']
+ self.nfsv41_read_delegation = parameters['nfsv41_read_delegation']
+ self.nfsv41_referrals = parameters['nfsv41_referrals']
+ self.nfsv41_write_delegation = parameters['nfsv41_write_delegation']
+ self.nfsv41_pnfs = parameters['nfsv41_pnfs']
+ self.nfsv4_numeric_ids = parameters['nfsv4_numeric_ids']
+ self.showmount = parameters['showmount']
+ self.tcp_max_xfer_size = parameters['tcp_max_xfer_size']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_nfs_service(self):
+ """
+ Return details about nfs
+ :param:
+ name : name of the vserver
+ :return: Details about nfs. None if not found.
+ :rtype: dict
+ """
+ nfs_get_iter = netapp_utils.zapi.NaElement('nfs-service-get-iter')
+ nfs_info = netapp_utils.zapi.NaElement('nfs-info')
+ nfs_info.add_new_child('vserver', self.vserver)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(nfs_info)
+ nfs_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(nfs_get_iter, True)
+ nfs_details = None
+ # check if job exists
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list').get_child_by_name('nfs-info')
+ is_nfsv3_enabled = attributes_list.get_child_content('is-nfsv3-enabled')
+ is_nfsv3_fsid_change_enabled = attributes_list.get_child_content('is-nfsv3-fsid-change-enabled')
+ is_nfsv4_fsid_change_enabled = attributes_list.get_child_content('is-nfsv4-fsid-change-enabled')
+ is_nfsv40_enabled = attributes_list.get_child_content('is-nfsv40-enabled')
+ is_nfsv41_enabled = attributes_list.get_child_content('is-nfsv41-enabled')
+ is_vstorage_enabled = attributes_list.get_child_content('is-vstorage-enabled')
+ nfsv4_id_domain_value = attributes_list.get_child_content('nfsv4-id-domain')
+ is_tcp_enabled = attributes_list.get_child_content('is-tcp-enabled')
+ is_udp_enabled = attributes_list.get_child_content('is-udp-enabled')
+ is_nfsv40_acl_enabled = attributes_list.get_child_content('is-nfsv40-acl-enabled')
+ is_nfsv40_write_delegation_enabled = attributes_list.get_child_content('is-nfsv40-write-delegation-enabled')
+ is_nfsv40_read_delegation_enabled = attributes_list.get_child_content('is-nfsv40-read-delegation-enabled')
+ is_nfsv40_referrals_enabled = attributes_list.get_child_content('is-nfsv40-referrals-enabled')
+ is_nfsv41_acl_enabled = attributes_list.get_child_content('is-nfsv41-acl-enabled')
+ is_nfsv41_write_delegation_enabled = attributes_list.get_child_content('is-nfsv41-write-delegation-enabled')
+ is_nfsv41_read_delegation_enabled = attributes_list.get_child_content('is-nfsv41-read-delegation-enabled')
+ is_nfsv41_referrals_enabled = attributes_list.get_child_content('is-nfsv41-referrals-enabled')
+ is_nfsv41_pnfs_enabled = attributes_list.get_child_content('is-nfsv41-pnfs-enabled')
+ is_nfsv4_numeric_ids_enabled = attributes_list.get_child_content('is-nfsv4-numeric-ids-enabled')
+ is_showmount_enabled = attributes_list.get_child_content('showmount')
+ tcp_max_xfer_size = attributes_list.get_child_content('tcp-max-xfer-size')
+ nfs_details = {
+ 'is_nfsv3_enabled': is_nfsv3_enabled,
+ 'is_nfsv3_fsid_change_enabled': is_nfsv3_fsid_change_enabled,
+ 'is_nfsv4_fsid_change_enabled': is_nfsv4_fsid_change_enabled,
+ 'is_nfsv40_enabled': is_nfsv40_enabled,
+ 'is_nfsv41_enabled': is_nfsv41_enabled,
+ 'is_nfsv41_pnfs_enabled': is_nfsv41_pnfs_enabled,
+ 'is_nfsv4_numeric_ids_enabled': is_nfsv4_numeric_ids_enabled,
+ 'is_vstorage_enabled': is_vstorage_enabled,
+ 'nfsv4_id_domain': nfsv4_id_domain_value,
+ 'is_tcp_enabled': is_tcp_enabled,
+ 'is_udp_enabled': is_udp_enabled,
+ 'is_nfsv40_acl_enabled': is_nfsv40_acl_enabled,
+ 'is_nfsv40_read_delegation_enabled': is_nfsv40_read_delegation_enabled,
+ 'is_nfsv40_referrals_enabled': is_nfsv40_referrals_enabled,
+ 'is_nfsv40_write_delegation_enabled': is_nfsv40_write_delegation_enabled,
+ 'is_nfsv41_acl_enabled': is_nfsv41_acl_enabled,
+ 'is_nfsv41_read_delegation_enabled': is_nfsv41_read_delegation_enabled,
+ 'is_nfsv41_referrals_enabled': is_nfsv41_referrals_enabled,
+ 'is_nfsv41_write_delegation_enabled': is_nfsv41_write_delegation_enabled,
+ 'is_showmount_enabled': is_showmount_enabled,
+ 'tcp_max_xfer_size': tcp_max_xfer_size
+ }
+ return nfs_details
+
+ def get_nfs_status(self):
+ """
+ Return status of nfs
+ :param:
+ name : Name of the vserver
+ :return: status of nfs. None if not found.
+ :rtype: bool
+ """
+ nfs_status = netapp_utils.zapi.NaElement('nfs-status')
+ result = self.server.invoke_successfully(nfs_status, True)
+ return_value = result.get_child_content('is-enabled')
+
+ return return_value
+
+ def enable_nfs(self):
+ """
+ enable nfs (online). If the NFS service was not explicitly created,
+ this API will create one with default options.
+ """
+ nfs_enable = netapp_utils.zapi.NaElement.create_node_with_children('nfs-enable')
+ try:
+ self.server.invoke_successfully(nfs_enable,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the service_state of nfs %s to %s: %s' %
+ (self.vserver, self.service_state, to_native(error)),
+ exception=traceback.format_exc())
+
+ def disable_nfs(self):
+ """
+ disable nfs (offline).
+ """
+ nfs_disable = netapp_utils.zapi.NaElement.create_node_with_children('nfs-disable')
+ try:
+ self.server.invoke_successfully(nfs_disable,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the service_state of nfs %s to %s: %s' %
+ (self.vserver, self.service_state, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nfs(self):
+ """
+ modify nfs service
+ """
+ nfs_modify = netapp_utils.zapi.NaElement('nfs-service-modify')
+ if self.nfsv3 == 'enabled':
+ nfs_modify.add_new_child('is-nfsv3-enabled', 'true')
+ elif self.nfsv3 == 'disabled':
+ nfs_modify.add_new_child('is-nfsv3-enabled', 'false')
+ if self.nfsv3_fsid_change == 'enabled':
+ nfs_modify.add_new_child('is-nfsv3-fsid-change-enabled', 'true')
+ elif self.nfsv3_fsid_change == 'disabled':
+ nfs_modify.add_new_child('is-nfsv3-fsid-change-enabled', 'false')
+ if self.nfsv4_fsid_change == 'enabled':
+ nfs_modify.add_new_child('is-nfsv4-fsid-change-enabled', 'true')
+ elif self.nfsv4_fsid_change == 'disabled':
+ nfs_modify.add_new_child('is-nfsv4-fsid-change-enabled', 'false')
+ if self.nfsv4 == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-enabled', 'true')
+ elif self.nfsv4 == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-enabled', 'false')
+ if self.nfsv41 == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-enabled', 'true')
+ elif self.nfsv41 == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-enabled', 'false')
+ if self.vstorage_state == 'enabled':
+ nfs_modify.add_new_child('is-vstorage-enabled', 'true')
+ elif self.vstorage_state == 'disabled':
+ nfs_modify.add_new_child('is-vstorage-enabled', 'false')
+ if self.tcp == 'enabled':
+ nfs_modify.add_new_child('is-tcp-enabled', 'true')
+ elif self.tcp == 'disabled':
+ nfs_modify.add_new_child('is-tcp-enabled', 'false')
+ if self.udp == 'enabled':
+ nfs_modify.add_new_child('is-udp-enabled', 'true')
+ elif self.udp == 'disabled':
+ nfs_modify.add_new_child('is-udp-enabled', 'false')
+ if self.nfsv40_acl == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-acl-enabled', 'true')
+ elif self.nfsv40_acl == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-acl-enabled', 'false')
+ if self.nfsv40_read_delegation == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-read-delegation-enabled', 'true')
+ elif self.nfsv40_read_delegation == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-read-delegation-enabled', 'false')
+ if self.nfsv40_referrals == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-referrals-enabled', 'true')
+ elif self.nfsv40_referrals == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-referrals-enabled', 'false')
+ if self.nfsv40_write_delegation == 'enabled':
+ nfs_modify.add_new_child('is-nfsv40-write-delegation-enabled', 'true')
+ elif self.nfsv40_write_delegation == 'disabled':
+ nfs_modify.add_new_child('is-nfsv40-write-delegation-enabled', 'false')
+ if self.nfsv41_acl == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-acl-enabled', 'true')
+ elif self.nfsv41_acl == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-acl-enabled', 'false')
+ if self.nfsv41_read_delegation == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-read-delegation-enabled', 'true')
+ elif self.nfsv41_read_delegation == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-read-delegation-enabled', 'false')
+ if self.nfsv41_referrals == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-referrals-enabled', 'true')
+ elif self.nfsv41_referrals == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-referrals-enabled', 'false')
+ if self.nfsv41_write_delegation == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-write-delegation-enabled', 'true')
+ elif self.nfsv41_write_delegation == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-write-delegation-enabled', 'false')
+ if self.nfsv41_pnfs == 'enabled':
+ nfs_modify.add_new_child('is-nfsv41-pnfs-enabled', 'true')
+ elif self.nfsv41_pnfs == 'disabled':
+ nfs_modify.add_new_child('is-nfsv41-pnfs-enabled', 'false')
+ if self.nfsv4_numeric_ids == 'enabled':
+ nfs_modify.add_new_child('is-nfsv4-numeric-ids-enabled', 'true')
+ elif self.nfsv4_numeric_ids == 'disabled':
+ nfs_modify.add_new_child('is-nfsv4-numeric-ids-enabled', 'false')
+ if self.showmount == 'enabled':
+ nfs_modify.add_new_child('showmount', 'true')
+ elif self.showmount == 'disabled':
+ nfs_modify.add_new_child('showmount', 'false')
+ if self.tcp_max_xfer_size is not None:
+ nfs_modify.add_new_child('tcp-max-xfer-size', str(self.tcp_max_xfer_size))
+ try:
+ self.server.invoke_successfully(nfs_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying nfs: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nfsv4_id_domain(self):
+ """
+ modify nfs service
+ """
+ nfsv4_id_domain_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'nfs-service-modify', **{'nfsv4-id-domain': self.nfsv4_id_domain})
+ if nfsv4_id_domain_modify is not None:
+ try:
+ self.server.invoke_successfully(nfsv4_id_domain_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying nfs: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_nfs(self):
+ """
+ delete nfs service.
+ """
+ nfs_delete = netapp_utils.zapi.NaElement.create_node_with_children('nfs-service-destroy')
+ try:
+ self.server.invoke_successfully(nfs_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting nfs: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """Apply action to nfs"""
+ changed = False
+ nfs_exists = False
+ modify_nfs = False
+ enable_nfs = False
+ disable_nfs = False
+ netapp_utils.ems_log_event("na_ontap_nfs", self.server)
+ nfs_enabled = self.get_nfs_status()
+ nfs_service_details = self.get_nfs_service()
+ is_nfsv4_id_domain_changed = False
+
+ def state_changed(expected, current):
+ if expected == "enabled" and current == "true":
+ return False
+ if expected == "disabled" and current == "false":
+ return False
+ return True
+
+ def is_modify_needed():
+ if (((self.nfsv3 is not None) and state_changed(self.nfsv3, nfs_service_details['is_nfsv3_enabled'])) or
+ ((self.nfsv3_fsid_change is not None) and state_changed(self.nfsv3_fsid_change, nfs_service_details['is_nfsv3_fsid_change_enabled'])) or
+ ((self.nfsv4_fsid_change is not None) and state_changed(self.nfsv4_fsid_change, nfs_service_details['is_nfsv4_fsid_change_enabled'])) or
+ ((self.nfsv4 is not None) and state_changed(self.nfsv4, nfs_service_details['is_nfsv40_enabled'])) or
+ ((self.nfsv41 is not None) and state_changed(self.nfsv41, nfs_service_details['is_nfsv41_enabled'])) or
+ ((self.nfsv41_pnfs is not None) and state_changed(self.nfsv41_pnfs, nfs_service_details['is_nfsv41_pnfs_enabled'])) or
+ ((self.nfsv4_numeric_ids is not None) and state_changed(self.nfsv4_numeric_ids, nfs_service_details['is_nfsv4_numeric_ids_enabled'])) or
+ ((self.tcp is not None) and state_changed(self.tcp, nfs_service_details['is_tcp_enabled'])) or
+ ((self.udp is not None) and state_changed(self.udp, nfs_service_details['is_udp_enabled'])) or
+ ((self.nfsv40_acl is not None) and state_changed(self.nfsv40_acl, nfs_service_details['is_nfsv40_acl_enabled'])) or
+ ((self.nfsv40_read_delegation is not None) and state_changed(self.nfsv40_read_delegation,
+ nfs_service_details['is_nfsv40_read_delegation_enabled'])) or
+ ((self.nfsv40_write_delegation is not None) and state_changed(self.nfsv40_write_delegation,
+ nfs_service_details['is_nfsv40_write_delegation_enabled'])) or
+ ((self.nfsv41_acl is not None) and state_changed(self.nfsv41_acl, nfs_service_details['is_nfsv41_acl_enabled'])) or
+ ((self.nfsv41_read_delegation is not None) and state_changed(self.nfsv41_read_delegation,
+ nfs_service_details['is_nfsv41_read_delegation_enabled'])) or
+ ((self.nfsv41_write_delegation is not None) and state_changed(self.nfsv41_write_delegation,
+ nfs_service_details['is_nfsv41_write_delegation_enabled'])) or
+ ((self.nfsv40_referrals is not None) and state_changed(self.nfsv40_referrals,
+ nfs_service_details['is_nfsv40_referrals_enabled'])) or
+ ((self.nfsv41_referrals is not None) and state_changed(self.nfsv41_referrals,
+ nfs_service_details['is_nfsv41_referrals_enabled'])) or
+ ((self.showmount is not None) and state_changed(self.showmount, nfs_service_details['is_showmount_enabled'])) or
+ ((self.vstorage_state is not None) and state_changed(self.vstorage_state, nfs_service_details['is_vstorage_enabled'])) or
+ ((self.tcp_max_xfer_size is not None) and int(self.tcp_max_xfer_size) != int(nfs_service_details['tcp_max_xfer_size']))):
+ return True
+ return False
+
+ def is_domain_changed():
+ if (self.nfsv4_id_domain is not None) and (self.nfsv4_id_domain != nfs_service_details['nfsv4_id_domain']):
+ return True
+ return False
+
+ if nfs_service_details:
+ nfs_exists = True
+ if self.state == 'absent': # delete
+ changed = True
+ elif self.state == 'present': # modify
+ if self.service_state == 'started' and nfs_enabled == 'false':
+ enable_nfs = True
+ changed = True
+ elif self.service_state == 'stopped' and nfs_enabled == 'true':
+ disable_nfs = True
+ changed = True
+ if is_modify_needed():
+ modify_nfs = True
+ changed = True
+ if is_domain_changed():
+ is_nfsv4_id_domain_changed = True
+ changed = True
+ else:
+ if self.state == 'present': # create
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present': # execute create
+ if not nfs_exists:
+ self.enable_nfs()
+ nfs_service_details = self.get_nfs_service()
+ if self.service_state == 'stopped':
+ self.disable_nfs()
+ if is_modify_needed():
+ self.modify_nfs()
+ if is_domain_changed():
+ self.modify_nfsv4_id_domain()
+ else:
+ if enable_nfs:
+ self.enable_nfs()
+ elif disable_nfs:
+ self.disable_nfs()
+ if modify_nfs:
+ self.modify_nfs()
+ if is_nfsv4_id_domain_changed:
+ self.modify_nfsv4_id_domain()
+ elif self.state == 'absent': # execute delete
+ self.delete_nfs()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """ Create object and call apply """
+ obj = NetAppONTAPNFS()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py
new file mode 100644
index 00000000..89f6e98f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_node
+short_description: NetApp ONTAP Rename a node.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Rename an ONTAP node.
+options:
+ name:
+ description:
+ - The new name for the node
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - The name of the node to be renamed. If I(name) already exists, no action will be performed.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: rename node
+ na_ontap_node:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ from_name: laurentn-vsim1
+ name: laurentncluster-2
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNode(object):
+ """
+ Rename node
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ from_name=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def rename_node(self):
+ """
+ Rename an existing node
+ :return: none
+ """
+ node_obj = netapp_utils.zapi.NaElement('system-node-rename')
+ node_obj.add_new_child('node', self.parameters['from_name'])
+ node_obj.add_new_child('new-name', self.parameters['name'])
+ try:
+ self.cluster.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating node: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_node(self, name):
+ node_obj = netapp_utils.zapi.NaElement('system-node-get')
+ node_obj.add_new_child('node', name)
+ try:
+ self.cluster.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "13115":
+ # 13115 (EINVALIDINPUTERROR) if the node does not exist
+ return None
+ else:
+ self.module.fail_json(msg=to_native(
+ error), exception=traceback.format_exc())
+ return True
+
+ def apply(self):
+ # logging ems event
+ results = netapp_utils.get_cserver(self.cluster)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_node", cserver)
+
+ exists = self.get_node(self.parameters['name'])
+ from_exists = self.get_node(self.parameters['from_name'])
+ changed = False
+ if exists:
+ pass
+ else:
+ if from_exists:
+ if not self.module.check_mode:
+ self.rename_node()
+ changed = True
+ else:
+ self.module.fail_json(msg='Error renaming node, from_name %s does not exist' % self.parameters['from_name'])
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Start, Stop and Enable node services.
+ """
+ obj = NetAppOntapNode()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py
new file mode 100644
index 00000000..0b535992
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+module: na_ontap_ntfs_dacl
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp Ontap create, delate or modify NTFS DACL (discretionary access control list)
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+- Create, modify, or destroy a NTFS DACL
+
+options:
+ state:
+ description:
+ - Whether the specified NTFS DACL should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the NTFS DACL.
+ required: true
+ type: str
+
+ security_descriptor:
+ description:
+ - Specifies the NTFS security descriptor.
+ required: true
+ type: str
+
+ access_type:
+ description:
+ - Specifies DACL ACE's access type. Possible values.
+ choices: ['allow', 'deny']
+ required: true
+ type: str
+
+ account:
+ description:
+ - Specifies DACL ACE's SID or domain account name of NTFS security descriptor.
+ required: true
+ type: str
+
+ rights:
+ description:
+ - Specifies DACL ACE's access rights. Mutually exclusive with advanced_access_rights.
+ choices: ['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write']
+ type: str
+
+ apply_to:
+ description:
+ - Specifies apply DACL entry.
+ choices: ['this_folder', 'sub_folders', 'files']
+ type: list
+ elements: str
+
+ advanced_access_rights:
+ description:
+ - Specifies DACL ACE's Advanced access rights. Mutually exclusive with rights.
+ choices: ['read_data', 'write_data', 'append_data', 'read_ea', 'write_ea', 'execute_file', 'delete_child',
+ 'read_attr', 'write_attr', 'delete', 'read_perm', 'write_perm', 'write_owner', 'full_control']
+ type: list
+ elements: str
+
+"""
+
+EXAMPLES = """
+ - name: Add NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: present
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ access_type: allow
+ account: DOMAIN\\Account
+ rights: modify
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+
+ - name: Modify NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: present
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ access_type: full_control
+ account: DOMAIN\\Account
+ rights: modify
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Remove NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: absent
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ account: DOMAIN\\Account
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNtfsDacl(object):
+ """
+ Creates, Modifies and Destroys an NTFS DACL
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap NTFS DACL class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ security_descriptor=dict(required=True, type='str'),
+ access_type=dict(required=True, choices=['allow', 'deny'], type='str'),
+ account=dict(required=True, type='str'),
+ rights=dict(required=False,
+ choices=['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'],
+ type='str'),
+ apply_to=dict(required=False, choices=['this_folder', 'sub_folders', 'files'], type='list', elements='str'),
+ advanced_access_rights=dict(required=False,
+ choices=['read_data', 'write_data', 'append_data', 'read_ea', 'write_ea',
+ 'execute_file', 'delete_child', 'read_attr', 'write_attr', 'delete',
+ 'read_perm', 'write_perm', 'write_owner', 'full_control'],
+ type='list', elements='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('rights', 'advanced_access_rights')],
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_dacl(self):
+
+ dacl_entry = None
+ advanced_access_list = None
+
+ dacl_get_iter = netapp_utils.zapi.NaElement('file-directory-security-ntfs-dacl-get-iter')
+ dacl_info = netapp_utils.zapi.NaElement('file-directory-security-ntfs-dacl')
+ dacl_info.add_new_child('vserver', self.parameters['vserver'])
+ dacl_info.add_new_child('ntfs-sd', self.parameters['security_descriptor'])
+ dacl_info.add_new_child('access-type', self.parameters['access_type'])
+ dacl_info.add_new_child('account', self.parameters['account'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(dacl_info)
+ dacl_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(dacl_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'],
+ to_native(error)), exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+
+ if attributes_list is None:
+ return None
+
+ dacl = attributes_list.get_child_by_name('file-directory-security-ntfs-dacl')
+
+ apply_to_list = []
+ apply_to = dacl.get_child_by_name('apply-to')
+ for apply_child in apply_to.get_children():
+ inheritance_level = apply_child.get_content()
+
+ apply_to_list.append(inheritance_level)
+
+ if dacl.get_child_by_name('advanced-rights'):
+
+ advanced_access_list = []
+ advanced_access = dacl.get_child_by_name('advanced-rights')
+ for right in advanced_access.get_children():
+ advanced_access_right = right.get_content()
+ advanced_right = {
+ 'advanced_access_rights': advanced_access_right
+ }
+ advanced_access_list.append(advanced_right)
+
+ dacl_entry = {
+ 'access_type': dacl.get_child_content('access-type'),
+ 'account': dacl.get_child_content('account'),
+ 'apply_to': apply_to_list,
+ 'security_descriptor': dacl.get_child_content('ntfs-sd'),
+ 'readable_access_rights': dacl.get_child_content('readable-access-rights'),
+ 'vserver': dacl.get_child_content('vserver'),
+ }
+
+ if advanced_access_list is not None:
+ dacl_entry['advanced_rights'] = advanced_access_list
+ else:
+ dacl_entry['rights'] = dacl.get_child_content('rights')
+ return dacl_entry
+
+ def add_dacl(self):
+ """
+ Adds a new NTFS DACL to an existing NTFS security descriptor
+ """
+
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-add")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ if 'rights' not in self.parameters.keys() and 'advanced_access_rights' not in self.parameters.keys():
+ self.module.fail_json(msg='Either rights or advanced_access_rights must be specified.')
+
+ if self.parameters.get('apply_to'):
+ apply_to_obj = netapp_utils.zapi.NaElement("apply-to")
+
+ for apply_entry in self.parameters['apply_to']:
+ apply_to_obj.add_new_child('inheritance-level', apply_entry)
+ dacl_obj.add_child_elem(apply_to_obj)
+
+ if self.parameters.get('advanced_access_rights'):
+ access_rights_obj = netapp_utils.zapi.NaElement("advanced-rights")
+
+ for right in self.parameters['advanced_access_rights']:
+ access_rights_obj.add_new_child('advanced-access-rights', right)
+
+ dacl_obj.add_child_elem(access_rights_obj)
+
+ if self.parameters.get('rights'):
+ dacl_obj.add_new_child("rights", self.parameters['rights'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_dacl(self):
+ """
+ Deletes a NTFS DACL from an existing NTFS security descriptor
+ """
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-remove")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_dacl(self):
+ """
+ Modifies a NTFS DACL on an existing NTFS security descriptor
+ """
+
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-modify")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ if self.parameters.get('apply_to'):
+ apply_to_obj = netapp_utils.zapi.NaElement("apply-to")
+
+ for apply_entry in self.parameters['apply_to']:
+ apply_to_obj.add_new_child('inheritance-level', apply_entry)
+ dacl_obj.add_child_elem(apply_to_obj)
+
+ if self.parameters.get('advanced_access_rights'):
+ access_rights_obj = netapp_utils.zapi.NaElement("advanced-rights")
+
+ for right in self.parameters['advanced_access_rights']:
+ access_rights_obj.add_new_child('advanced-access-rights', right)
+
+ dacl_obj.add_child_elem(access_rights_obj)
+
+ if self.parameters.get('rights'):
+ dacl_obj.add_new_child("rights", self.parameters['rights'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_ntfs_dacl", cserver)
+
+ def apply(self):
+ self.autosupport_log()
+ current, modify = self.get_dacl(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.add_dacl()
+ elif cd_action == 'delete':
+ self.remove_dacl()
+ elif modify:
+ self.modify_dacl()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap NTFS DACL object and runs the correct play task
+ """
+ obj = NetAppOntapNtfsDacl()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py
new file mode 100644
index 00000000..fa517333
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+
+module: na_ontap_ntfs_sd
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP create, delete or modify NTFS security descriptor
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Create, modify or destroy NTFS security descriptor
+
+options:
+ state:
+ description:
+ - Whether the specified NTFS security descriptor should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the NTFS security descriptor.
+ required: true
+ type: str
+
+ name:
+ description:
+ - Specifies the NTFS security descriptor name. Not modifiable.
+ required: true
+ type: str
+
+ owner:
+ description:
+ - Specifies the owner's SID or domain account of the NTFS security descriptor.
+ - Need to provide the full path of the owner.
+ type: str
+
+ group:
+ description:
+ - Specifies the group's SID or domain account of the NTFS security descriptor.
+ - Need to provide the full path of the group.
+ required: false
+ type: str
+
+ control_flags_raw:
+ description:
+ - Specifies the security descriptor control flags.
+ - 1... .... .... .... = Self Relative
+ - .0.. .... .... .... = RM Control Valid
+ - ..0. .... .... .... = SACL Protected
+ - ...0 .... .... .... = DACL Protected
+ - .... 0... .... .... = SACL Inherited
+ - .... .0.. .... .... = DACL Inherited
+ - .... ..0. .... .... = SACL Inherit Required
+ - .... ...0 .... .... = DACL Inherit Required
+ - .... .... ..0. .... = SACL Defaulted
+ - .... .... ...0 .... = SACL Present
+ - .... .... .... 0... = DACL Defaulted
+ - .... .... .... .1.. = DACL Present
+ - .... .... .... ..0. = Group Defaulted
+ - .... .... .... ...0 = Owner Defaulted
+ - At present only the following flags are honored. Others are ignored.
+ - ..0. .... .... .... = SACL Protected
+ - ...0 .... .... .... = DACL Protected
+ - .... .... ..0. .... = SACL Defaulted
+ - .... .... .... 0... = DACL Defaulted
+ - .... .... .... ..0. = Group Defaulted
+ - .... .... .... ...0 = Owner Defaulted
+ - Convert the 16 bit binary flags and convert to decimal for the input.
+ type: int
+
+"""
+
+EXAMPLES = """
+ - name: Create NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: present
+ vserver: SVM1
+ name: ansible_sd
+ owner: DOMAIN\\Account
+ group: DOMAIN\\Group
+ control_flags_raw: 0
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: present
+ vserver: SVM1
+ name: ansible_sd
+ owner: DOMAIN\\Account
+ group: DOMAIN\\Group
+ control_flags_raw: 0
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: absent
+ vserver: SVM1
+ name: ansible_sd
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNtfsSd(object):
+ """
+ Creates, Modifies and Destroys a NTFS security descriptor
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap NTFS Security Descriptor class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ owner=dict(required=False, type='str'),
+ group=dict(required=False, type='str'),
+ control_flags_raw=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ntfs_sd(self):
+
+ ntfs_sd_entry, result = None, None
+
+ ntfs_sd_get_iter = netapp_utils.zapi.NaElement('file-directory-security-ntfs-get-iter')
+ ntfs_sd_info = netapp_utils.zapi.NaElement('file-directory-security-ntfs')
+ ntfs_sd_info.add_new_child('vserver', self.parameters['vserver'])
+ ntfs_sd_info.add_new_child('ntfs-sd', self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(ntfs_sd_info)
+ ntfs_sd_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(ntfs_sd_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching NTFS security descriptor %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ ntfs_sd = attributes_list.get_child_by_name('file-directory-security-ntfs')
+ ntfs_sd_entry = {
+ 'vserver': ntfs_sd.get_child_content('vserver'),
+ 'name': ntfs_sd.get_child_content('ntfs-sd'),
+ 'owner': ntfs_sd.get_child_content('owner'),
+ 'group': ntfs_sd.get_child_content('group'),
+ 'control_flags_raw': ntfs_sd.get_child_content('control-flags-raw'),
+ }
+ if ntfs_sd_entry.get('control_flags_raw'):
+ ntfs_sd_entry['control_flags_raw'] = int(ntfs_sd_entry['control_flags_raw'])
+ return ntfs_sd_entry
+ return None
+
+ def add_ntfs_sd(self):
+ """
+ Adds a new NTFS security descriptor
+ """
+
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-create")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+
+ if self.parameters.get('control_flags_raw') is not None:
+ ntfs_sd_obj.add_new_child("control-flags-raw", str(self.parameters['control_flags_raw']))
+
+ if self.parameters.get('owner'):
+ ntfs_sd_obj.add_new_child("owner", self.parameters['owner'])
+
+ if self.parameters.get('group'):
+ ntfs_sd_obj.add_new_child("group", self.parameters['group'])
+
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_ntfs_sd(self):
+ """
+ Deletes a NTFS security descriptor
+ """
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-delete")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ntfs_sd(self):
+ """
+ Modifies a NTFS security descriptor
+ """
+
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-modify")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+
+ if self.parameters.get('control_flags_raw') is not None:
+ ntfs_sd_obj.add_new_child('control-flags-raw', str(self.parameters['control_flags_raw']))
+
+ if self.parameters.get('owner'):
+ ntfs_sd_obj.add_new_child('owner', self.parameters['owner'])
+
+ if self.parameters.get('group'):
+ ntfs_sd_obj.add_new_child('group', self.parameters['group'])
+
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error modifying NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_ntfs_sd", self.server)
+ current, modify = self.get_ntfs_sd(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.add_ntfs_sd()
+ elif cd_action == 'delete':
+ self.remove_ntfs_sd()
+ elif modify:
+ self.modify_ntfs_sd()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates, deletes and modifies NTFS secudity descriptor
+ """
+ obj = NetAppOntapNtfsSd()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py
new file mode 100644
index 00000000..e7f88518
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = """
+module: na_ontap_ntp
+short_description: NetApp ONTAP NTP server
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create or delete or modify NTP server in ONTAP
+options:
+ state:
+ description:
+ - Whether the specified NTP server should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ server_name:
+ description:
+ - The name of the NTP server to manage.
+ required: True
+ type: str
+ version:
+ description:
+ - give version for NTP server
+ choices: ['auto', '3', '4']
+ default: 'auto'
+ type: str
+"""
+
+EXAMPLES = """
+ - name: Create NTP server
+ na_ontap_ntp:
+ state: present
+ version: auto
+ server_name: "{{ server_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete NTP server
+ na_ontap_ntp:
+ state: absent
+ server_name: "{{ server_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNTPServer(object):
+ """ object initialize and class methods """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ server_name=dict(required=True, type='str'),
+ version=dict(required=False, type='str', default='auto',
+ choices=['auto', '3', '4']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ parameters = self.module.params
+
+ # set up state variables
+ self.state = parameters['state']
+ self.server_name = parameters['server_name']
+ self.version = parameters['version']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_ntp_server(self):
+ """
+ Return details about the ntp server
+ :param:
+ name : Name of the server_name
+ :return: Details about the ntp server. None if not found.
+ :rtype: dict
+ """
+ ntp_iter = netapp_utils.zapi.NaElement('ntp-server-get-iter')
+ ntp_info = netapp_utils.zapi.NaElement('ntp-server-info')
+ ntp_info.add_new_child('server-name', self.server_name)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(ntp_info)
+
+ ntp_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(ntp_iter, True)
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ ntp_server_name = result.get_child_by_name('attributes-list').\
+ get_child_by_name('ntp-server-info').\
+ get_child_content('server-name')
+ server_version = result.get_child_by_name('attributes-list').\
+ get_child_by_name('ntp-server-info').\
+ get_child_content('version')
+ return_value = {
+ 'server-name': ntp_server_name,
+ 'version': server_version
+ }
+
+ return return_value
+
+ def create_ntp_server(self):
+ """
+ create ntp server.
+ """
+ ntp_server_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-create', **{'server-name': self.server_name,
+ 'version': self.version
+ })
+
+ try:
+ self.server.invoke_successfully(ntp_server_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating ntp server %s: %s'
+ % (self.server_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ntp_server(self):
+ """
+ delete ntp server.
+ """
+ ntp_server_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-delete', **{'server-name': self.server_name})
+
+ try:
+ self.server.invoke_successfully(ntp_server_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting ntp server %s: %s'
+ % (self.server_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_version(self):
+ """
+ modify the version.
+ """
+ ntp_modify_versoin = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-modify',
+ **{'server-name': self.server_name, 'version': self.version})
+ try:
+ self.server.invoke_successfully(ntp_modify_versoin,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying version for ntp server %s: %s'
+ % (self.server_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """Apply action to ntp-server"""
+
+ changed = False
+ ntp_modify = False
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_ntp", cserver)
+ ntp_server_details = self.get_ntp_server()
+ if ntp_server_details is not None:
+ if self.state == 'absent': # delete
+ changed = True
+ elif self.state == 'present' and self.version:
+ # modify version
+ if self.version != ntp_server_details['version']:
+ ntp_modify = True
+ changed = True
+ else:
+ if self.state == 'present': # create
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if ntp_server_details is None:
+ self.create_ntp_server()
+ elif ntp_modify:
+ self.modify_version()
+ elif self.state == 'absent':
+ self.delete_ntp_server()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """ Create object and call apply """
+ ntp_obj = NetAppOntapNTPServer()
+ ntp_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py
new file mode 100644
index 00000000..055889e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVMe Service
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified NVMe should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ status_admin:
+ description:
+ - Whether the status of NVMe should be up or down
+ type: bool
+short_description: "NetApp ONTAP Manage NVMe Service"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVMe
+ na_ontap_nvme:
+ state: present
+ status_admin: False
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NVMe
+ na_ontap_nvme:
+ state: present
+ status_admin: True
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete NVMe
+ na_ontap_nvme:
+ state: absent
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNVMe(object):
+ """
+ Class with NVMe service methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ status_admin=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_nvme(self):
+ """
+ Get current nvme details
+ :return: dict if nvme exists, None otherwise
+ """
+ nvme_get = netapp_utils.zapi.NaElement('nvme-get-iter')
+ query = {
+ 'query': {
+ 'nvme-target-service-info': {
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ nvme_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(nvme_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching nvme info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ nvme_info = attributes_list.get_child_by_name('nvme-target-service-info')
+ return_value = {'status_admin': nvme_info.get_child_content('is-available')}
+ return return_value
+ return None
+
+ def create_nvme(self):
+ """
+ Create NVMe service
+ """
+ nvme_create = netapp_utils.zapi.NaElement('nvme-create')
+ if self.parameters.get('status_admin') is not None:
+ options = {'is-available': self.parameters['status_admin']}
+ nvme_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(nvme_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_nvme(self):
+ """
+ Delete NVMe service
+ """
+ nvme_delete = netapp_utils.zapi.NaElement('nvme-delete')
+ try:
+ self.server.invoke_successfully(nvme_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nvme(self, status=None):
+ """
+ Modify NVMe service
+ """
+ if status is None:
+ status = self.parameters['status_admin']
+ options = {'is-available': status}
+ nvme_modify = netapp_utils.zapi.NaElement('nvme-modify')
+ nvme_modify.translate_struct(options)
+ try:
+ self.server.invoke_successfully(nvme_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to NVMe service
+ """
+ netapp_utils.ems_log_event("na_ontap_nvme", self.server)
+ current = self.get_nvme()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters.get('status_admin') is not None:
+ self.parameters['status_admin'] = self.na_helper.get_value_for_bool(False, self.parameters['status_admin'])
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_nvme()
+ elif cd_action == 'delete':
+ # NVMe status_admin needs to be down before deleting it
+ self.modify_nvme('false')
+ self.delete_nvme()
+ elif modify:
+ self.modify_nvme()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMe()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py
new file mode 100644
index 00000000..e58ea581
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVME namespace
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme_namespace
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified namespace should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ ostype:
+ description:
+ - Specifies the ostype for initiators
+ choices: ['windows', 'linux', 'vmware', 'xen', 'hyper_v']
+ type: str
+ size:
+ description:
+ - Size in bytes.
+ Range is [0..2^63-1].
+ type: int
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: 'b'
+ path:
+ description:
+ - Namespace path.
+ required: true
+ type: str
+ block_size:
+ description:
+ - Size in bytes of a logical block. Possible values are 512 (Data ONTAP 9.6 and later), 4096. The default value is 4096.
+ choices: [512, 4096]
+ type: int
+ version_added: '20.5.0'
+short_description: "NetApp ONTAP Manage NVME Namespace"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVME Namespace
+ na_ontap_nvme_namespace:
+ state: present
+ ostype: linux
+ path: /vol/ansible/test
+ size: 20
+ size_unit: mb
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Create NVME Namespace (Idempotency)
+ na_ontap_nvme_namespace:
+ state: present
+ ostype: linux
+ path: /vol/ansible/test
+ size: 20
+ size_unit: mb
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNVMENamespace(object):
+ """
+ Class with NVME namespace methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ ostype=dict(required=False, type='str', choices=['windows', 'linux', 'vmware', 'xen', 'hyper_v']),
+ path=dict(required=True, type='str'),
+ size=dict(required=False, type='int'),
+ size_unit=dict(default='b', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'),
+ block_size=dict(required=False, choices=[512, 4096], type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['ostype', 'size'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * \
+ netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_namespace(self):
+ """
+ Get current namespace details
+ :return: dict if namespace exists, None otherwise
+ """
+ namespace_get = netapp_utils.zapi.NaElement('nvme-namespace-get-iter')
+ query = {
+ 'query': {
+ 'nvme-namespace-info': {
+ 'path': self.parameters['path'],
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ namespace_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(namespace_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching namespace info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result
+ return None
+
+ def create_namespace(self):
+ """
+ Create a NVME Namespace
+ """
+ options = {'path': self.parameters['path'],
+ 'ostype': self.parameters['ostype'],
+ 'size': self.parameters['size']
+ }
+ if self.parameters.get('block_size'):
+ options['block-size'] = self.parameters['block_size']
+ namespace_create = netapp_utils.zapi.NaElement('nvme-namespace-create')
+ namespace_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(namespace_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating namespace for path %s: %s'
+ % (self.parameters.get('path'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_namespace(self):
+ """
+ Delete a NVME Namespace
+ """
+ options = {'path': self.parameters['path']
+ }
+ namespace_delete = netapp_utils.zapi.NaElement.create_node_with_children('nvme-namespace-delete', **options)
+ try:
+ self.server.invoke_successfully(namespace_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting namespace for path %s: %s'
+ % (self.parameters.get('path'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to NVME Namespace
+ """
+ netapp_utils.ems_log_event("na_ontap_nvme_namespace", self.server)
+ current = self.get_namespace()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_namespace()
+ elif cd_action == 'delete':
+ self.delete_namespace()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMENamespace()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py
new file mode 100644
index 00000000..5229c6e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVME subsystem
+ - Associate(modify) host/map to NVME subsystem
+ - NVMe service should be existing in the data vserver with NVMe protocol as a pre-requisite
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme_subsystem
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified subsystem should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ subsystem:
+ description:
+ - Specifies the subsystem
+ required: true
+ type: str
+ ostype:
+ description:
+ - Specifies the ostype for initiators
+ choices: ['windows', 'linux', 'vmware', 'xen', 'hyper_v']
+ type: str
+ skip_host_check:
+ description:
+ - Skip host check
+ - Required to delete an NVMe Subsystem with attached NVMe namespaces
+ default: false
+ type: bool
+ skip_mapped_check:
+ description:
+ - Skip mapped namespace check
+ - Required to delete an NVMe Subsystem with attached NVMe namespaces
+ default: false
+ type: bool
+ hosts:
+ description:
+ - List of host NQNs (NVMe Qualification Name) associated to the controller.
+ type: list
+ elements: str
+ paths:
+ description:
+ - List of Namespace paths to be associated with the subsystem.
+ type: list
+ elements: str
+short_description: "NetApp ONTAP Manage NVME Subsystem"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVME Subsystem
+ na_ontap_nvme_subsystem:
+ state: present
+ subsystem: test_sub
+ vserver: test_dest
+ ostype: linux
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete NVME Subsystem
+ na_ontap_nvme_subsystem:
+ state: absent
+ subsystem: test_sub
+ vserver: test_dest
+ skip_host_check: True
+ skip_mapped_check: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Associate NVME Subsystem host/map
+ na_ontap_nvme_subsystem:
+ state: present
+ subsystem: "{{ subsystem }}"
+ ostype: linux
+ hosts: nqn.1992-08.com.netapp:sn.3017cfc1e2ba11e89c55005056b36338:subsystem.ansible
+ paths: /vol/ansible/test,/vol/ansible/test1
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NVME subsystem map
+ na_ontap_nvme_subsystem:
+ state: present
+ subsystem: test_sub
+ vserver: test_dest
+ skip_host_check: True
+ skip_mapped_check: True
+ paths: /vol/ansible/test
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNVMESubsystem(object):
+ """
+ Class with NVME subsytem methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ subsystem=dict(required=True, type='str'),
+ ostype=dict(required=False, type='str', choices=['windows', 'linux', 'vmware', 'xen', 'hyper_v']),
+ skip_host_check=dict(required=False, type='bool', default=False),
+ skip_mapped_check=dict(required=False, type='bool', default=False),
+ hosts=dict(required=False, type='list', elements='str'),
+ paths=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_subsystem(self):
+ """
+ Get current subsystem details
+ :return: dict if subsystem exists, None otherwise
+ """
+ subsystem_get = netapp_utils.zapi.NaElement('nvme-subsystem-get-iter')
+ query = {
+ 'query': {
+ 'nvme-subsystem-info': {
+ 'subsystem': self.parameters.get('subsystem'),
+ 'vserver': self.parameters.get('vserver')
+ }
+ }
+ }
+ subsystem_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(subsystem_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching subsystem info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ return None
+
+ def create_subsystem(self):
+ """
+ Create a NVME Subsystem
+ """
+ if self.parameters.get('ostype') is None:
+ self.module.fail_json(msg="Error: Missing required parameter 'os_type' for creating subsystem")
+ options = {'subsystem': self.parameters['subsystem'],
+ 'ostype': self.parameters['ostype']
+ }
+ subsystem_create = netapp_utils.zapi.NaElement('nvme-subsystem-create')
+ subsystem_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(subsystem_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating subsystem for %s: %s'
+ % (self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_subsystem(self):
+ """
+ Delete a NVME subsystem
+ """
+ options = {'subsystem': self.parameters['subsystem'],
+ 'skip-host-check': 'true' if self.parameters.get('skip_host_check') else 'false',
+ 'skip-mapped-check': 'true' if self.parameters.get('skip_mapped_check') else 'false',
+ }
+ subsystem_delete = netapp_utils.zapi.NaElement.create_node_with_children('nvme-subsystem-delete', **options)
+ try:
+ self.server.invoke_successfully(subsystem_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting subsystem for %s: %s'
+ % (self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_subsystem_host_map(self, type):
+ """
+ Get current subsystem host details
+ :return: list if host exists, None otherwise
+ """
+ if type == 'hosts':
+ zapi_get, zapi_info, zapi_type = 'nvme-subsystem-host-get-iter', 'nvme-target-subsystem-host-info',\
+ 'host-nqn'
+ elif type == 'paths':
+ zapi_get, zapi_info, zapi_type = 'nvme-subsystem-map-get-iter', 'nvme-target-subsystem-map-info', 'path'
+ subsystem_get = netapp_utils.zapi.NaElement(zapi_get)
+ query = {
+ 'query': {
+ zapi_info: {
+ 'subsystem': self.parameters.get('subsystem'),
+ 'vserver': self.parameters.get('vserver')
+ }
+ }
+ }
+ subsystem_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(subsystem_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching subsystem info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attrs_list = result.get_child_by_name('attributes-list')
+ return_list = []
+ for item in attrs_list.get_children():
+ return_list.append(item[zapi_type])
+ return {type: return_list}
+ return None
+
+ def add_subsystem_host_map(self, data, type):
+ """
+ Add a NVME Subsystem host/map
+ :param: data: list of hosts/paths to be added
+ :param: type: hosts/paths
+ """
+ if type == 'hosts':
+ zapi_add, zapi_type = 'nvme-subsystem-host-add', 'host-nqn'
+ elif type == 'paths':
+ zapi_add, zapi_type = 'nvme-subsystem-map-add', 'path'
+
+ for item in data:
+ options = {'subsystem': self.parameters['subsystem'],
+ zapi_type: item
+ }
+ subsystem_add = netapp_utils.zapi.NaElement.create_node_with_children(zapi_add, **options)
+ try:
+ self.server.invoke_successfully(subsystem_add, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding %s for subsystem %s: %s'
+ % (item, self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_subsystem_host_map(self, data, type):
+ """
+ Remove a NVME Subsystem host/map
+ :param: data: list of hosts/paths to be added
+ :param: type: hosts/paths
+ """
+ if type == 'hosts':
+ zapi_remove, zapi_type = 'nvme-subsystem-host-remove', 'host-nqn'
+ elif type == 'paths':
+ zapi_remove, zapi_type = 'nvme-subsystem-map-remove', 'path'
+
+ for item in data:
+ options = {'subsystem': self.parameters['subsystem'],
+ zapi_type: item
+ }
+ subsystem_remove = netapp_utils.zapi.NaElement.create_node_with_children(zapi_remove, **options)
+ try:
+ self.server.invoke_successfully(subsystem_remove, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing %s for subsystem %s: %s'
+ % (item, self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def associate_host_map(self, types):
+ """
+ Check if there are hosts or paths to be associated with the subsystem
+ """
+ action_add_dict = {}
+ action_remove_dict = {}
+ for type in types:
+ if self.parameters.get(type):
+ current = self.get_subsystem_host_map(type)
+ if current:
+ add_items = self.na_helper.\
+ get_modified_attributes(current, self.parameters, get_list_diff=True).get(type)
+ remove_items = [item for item in current[type] if item not in self.parameters.get(type)]
+ else:
+ add_items = self.parameters[type]
+ remove_items = {}
+ if add_items:
+ action_add_dict[type] = add_items
+ self.na_helper.changed = True
+ if remove_items:
+ action_remove_dict[type] = remove_items
+ self.na_helper.changed = True
+ return action_add_dict, action_remove_dict
+
+ def modify_host_map(self, add_host_map, remove_host_map):
+ for type, data in add_host_map.items():
+ self.add_subsystem_host_map(data, type)
+ for type, data in remove_host_map.items():
+ self.remove_subsystem_host_map(data, type)
+
+ def apply(self):
+ """
+ Apply action to NVME subsystem
+ """
+ netapp_utils.ems_log_event("na_ontap_nvme_subsystem", self.server)
+ types = ['hosts', 'paths']
+ current = self.get_subsystem()
+ add_host_map, remove_host_map = dict(), dict()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action != 'delete' and self.parameters['state'] == 'present':
+ add_host_map, remove_host_map = self.associate_host_map(types)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_subsystem()
+ self.modify_host_map(add_host_map, remove_host_map)
+ elif cd_action == 'delete':
+ self.delete_subsystem()
+ elif cd_action is None:
+ self.modify_host_map(add_host_map, remove_host_map)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMESubsystem()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py
new file mode 100644
index 00000000..eef83911
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py
@@ -0,0 +1,284 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_object_store
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_object_store
+short_description: NetApp ONTAP manage object store config.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete object store config on ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified object store config should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the object store config to manage.
+ type: str
+
+ provider_type:
+ required: false
+ description:
+ - The name of the object store config provider.
+ type: str
+
+ server:
+ required: false
+ description:
+ - Fully qualified domain name of the object store config.
+ type: str
+
+ container:
+ required: false
+ description:
+ - Data bucket/container name used in S3 requests.
+ type: str
+
+ access_key:
+ required: false
+ description:
+ - Access key ID for AWS_S3 and SGWS provider types.
+ type: str
+
+ secret_password:
+ required: false
+ description:
+ - Secret access key for AWS_S3 and SGWS provider types.
+ type: str
+'''
+
+EXAMPLES = """
+- name: object store Create
+ na_ontap_object_store:
+ state: present
+ name: ansible
+ provider_type: SGWS
+ server: abc
+ container: abc
+ access_key: s3.amazonaws.com
+ secret_password: abc
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: object store Create
+ na_ontap_object_store:
+ state: absent
+ name: ansible
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapObjectStoreConfig(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ provider_type=dict(required=False, type='str'),
+ server=dict(required=False, type='str'),
+ container=dict(required=False, type='str'),
+ access_key=dict(required=False, type='str'),
+ secret_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_aggr_object_store(self):
+ """
+ Fetch details if object store config exists.
+ :return:
+ Dictionary of current details if object store config found
+ None if object store config is not found
+ """
+ if self.use_rest:
+ data = {'fields': 'uuid,name',
+ 'name': self.parameters['name']}
+ api = "cloud/targets"
+ message, error = self.rest_api.get(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message['records']) != 0:
+ return message['records'][0]
+ return None
+ else:
+ aggr_object_store_get_iter = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-object-store-config-get', **{'object-store-name': self.parameters['name']})
+ result = None
+ try:
+ result = self.server.invoke_successfully(aggr_object_store_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 denotes an object store not being found.
+ if to_native(error.code) == "15661":
+ pass
+ else:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return result
+
+ def create_aggr_object_store(self):
+ """
+ Create aggregate object store config
+ :return: None
+ """
+ required_keys = set(['provider_type', 'server', 'container', 'access_key'])
+ if not required_keys.issubset(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error provisioning object store %s: one of the following parameters are missing '
+ '%s' % (self.parameters['name'], ', '.join(required_keys)))
+ if self.use_rest:
+ data = {'name': self.parameters['name'],
+ 'provider_type': self.parameters['provider_type'],
+ 'server': self.parameters['server'],
+ 'container': self.parameters['container'],
+ 'access_key': self.parameters['access_key'],
+ 'owner': 'fabricpool'}
+ if self.parameters.get('secret_password'):
+ data['secret_password'] = self.parameters['secret_password']
+ api = "cloud/targets"
+ dummy, error = self.rest_api.post(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ options = {'object-store-name': self.parameters['name'],
+ 'provider-type': self.parameters['provider_type'],
+ 'server': self.parameters['server'],
+ 's3-name': self.parameters['container'],
+ 'access-key': self.parameters['access_key']}
+ if self.parameters.get('secret_password'):
+ options['secret-password'] = self.parameters['secret_password']
+ object_store_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-object-store-config-create', **options)
+
+ try:
+ self.server.invoke_successfully(object_store_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error provisioning object store config %s: %s"
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_aggr_object_store(self, uuid=None):
+ """
+ Delete aggregate object store config
+ :return: None
+ """
+ if self.use_rest:
+ api = "cloud/targets/%s" % uuid
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ object_store_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-object-store-config-delete', **{'object-store-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(object_store_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing object store config %s: %s" %
+ (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ """
+ Apply action to the object store config
+ :return: None
+ """
+ uuid = None
+ if not self.use_rest:
+ self.asup_log_for_cserver("na_ontap_object_store_config")
+ current = self.get_aggr_object_store()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_aggr_object_store()
+ elif cd_action == 'delete':
+ if self.use_rest:
+ uuid = current['uuid']
+ self.delete_aggr_object_store(uuid)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create Object Store Config class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppOntapObjectStoreConfig()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py
new file mode 100644
index 00000000..f676f7c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+''' This is an Ansible module for ONTAP to manage ports for various resources.
+
+ (c) 2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_ports
+short_description: NetApp ONTAP add/remove ports
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add or remove ports for broadcast domain and portset.
+
+options:
+ state:
+ description:
+ - Whether the specified port should be added or removed.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Name of the SVM.
+ - Specify this option when operating on portset.
+ type: str
+
+ names:
+ description:
+ - List of ports.
+ type: list
+ elements: str
+ required: true
+
+ resource_name:
+ description:
+ - name of the portset or broadcast domain.
+ type: str
+ required: true
+
+ resource_type:
+ description:
+ - type of the resource to add a port to or remove a port from.
+ choices: ['broadcast_domain', 'portset']
+ required: true
+ type: str
+
+ ipspace:
+ description:
+ - Specify the required ipspace for the broadcast domain.
+ - A domain ipspace can not be modified after the domain has been created.
+ type: str
+
+ portset_type:
+ description:
+ - Protocols accepted for portset.
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+
+'''
+
+EXAMPLES = '''
+
+ - name: broadcast domain remove port
+ tags:
+ - remove
+ na_ontap_ports:
+ state: absent
+ names: test-vsim1:e0d-1,test-vsim1:e0d-2
+ resource_type: broadcast_domain
+ resource_name: ansible_domain
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: broadcast domain add port
+ tags:
+ - add
+ na_ontap_ports:
+ state: present
+ names: test-vsim1:e0d-1,test-vsim1:e0d-2
+ resource_type: broadcast_domain
+ resource_name: ansible_domain
+ ipspace: Default
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: portset remove port
+ tags:
+ - remove
+ na_ontap_ports:
+ state: absent
+ names: lif_2
+ resource_type: portset
+ resource_name: portset_1
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: portset add port
+ tags:
+ - add
+ na_ontap_ports:
+ state: present
+ names: lif_2
+ resource_type: portset
+ resource_name: portset_1
+ portset_type: iscsi
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapPorts(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=False, type='str'),
+ names=dict(required=True, type='list', elements='str'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str', choices=['broadcast_domain', 'portset']),
+ ipspace=dict(required=False, type='str'),
+ portset_type=dict(required=False, type='str', choices=['fcp', 'iscsi', 'mixed']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('resource_type', 'portset', ['vserver']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ if self.parameters['resource_type'] == 'broadcast_domain':
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ elif self.parameters['resource_type'] == 'portset':
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+
+ def add_broadcast_domain_ports(self, ports):
+ """
+ Add broadcast domain ports
+ :param: ports to be added.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['resource_name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port for broadcast domain %s: %s' %
+ (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ :param: ports to be removed.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['resource_name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port for broadcast domain %s: %s' %
+ (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_broadcast_domain_ports(self):
+ """
+ Return details about the broadcast domain ports.
+ :return: Details about the broadcast domain ports. [] if not found.
+ :rtype: list
+ """
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', self.parameters['resource_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ ports = []
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ return ports
+
+ def remove_portset_ports(self, port):
+ """
+ Removes all existing ports from portset
+ :return: None
+ """
+ options = {'portset-name': self.parameters['resource_name'],
+ 'portset-port-name': port.strip()}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children('portset-remove', **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port in portset %s: %s' %
+ (self.parameters['resource_name'], to_native(error)), exception=traceback.format_exc())
+
+ def add_portset_ports(self, port):
+ """
+ Add the list of ports to portset
+ :return: None
+ """
+ options = {'portset-name': self.parameters['resource_name'],
+ 'portset-port-name': port.strip()}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children('portset-add', **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port in portset %s: %s' %
+ (self.parameters['resource_name'], to_native(error)), exception=traceback.format_exc())
+
+ def portset_get_iter(self):
+ """
+ Compose NaElement object to query current portset using vserver, portset-name and portset-type parameters
+ :return: NaElement object for portset-get-iter with query
+ """
+ portset_get = netapp_utils.zapi.NaElement('portset-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ portset_info = netapp_utils.zapi.NaElement('portset-info')
+ portset_info.add_new_child('vserver', self.parameters['vserver'])
+ portset_info.add_new_child('portset-name', self.parameters['resource_name'])
+ if self.parameters.get('portset_type'):
+ portset_info.add_new_child('portset-type', self.parameters['portset_type'])
+ query.add_child_elem(portset_info)
+ portset_get.add_child_elem(query)
+ return portset_get
+
+ def portset_get(self):
+ """
+ Get current portset info
+ :return: List of current ports if query successful, else return []
+ """
+ portset_get_iter = self.portset_get_iter()
+ result, ports = None, []
+ try:
+ result = self.server.invoke_successfully(portset_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching portset %s: %s'
+ % (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+ # return portset details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ portset_get_info = result.get_child_by_name('attributes-list').get_child_by_name('portset-info')
+ if int(portset_get_info.get_child_content('portset-port-total')) > 0:
+ port_info = portset_get_info.get_child_by_name('portset-port-info')
+ ports = [port.get_content() for port in port_info.get_children()]
+ return ports
+
+ def modify_broadcast_domain_ports(self):
+ """
+ compare current and desire ports. Call add or remove ports methods if needed.
+ :return: None.
+ """
+ current_ports = self.get_broadcast_domain_ports()
+ cd_ports = self.parameters['names']
+ if self.parameters['state'] == 'present':
+ ports_to_add = [port for port in cd_ports if port not in current_ports]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ self.add_broadcast_domain_ports(ports_to_add)
+ self.na_helper.changed = True
+
+ if self.parameters['state'] == 'absent':
+ ports_to_remove = [port for port in cd_ports if port in current_ports]
+ if len(ports_to_remove) > 0:
+ if not self.module.check_mode:
+ self.remove_broadcast_domain_ports(ports_to_remove)
+ self.na_helper.changed = True
+
+ def modify_portset_ports(self):
+ current_ports = self.portset_get()
+ cd_ports = self.parameters['names']
+ if self.parameters['state'] == 'present':
+ ports_to_add = [port for port in cd_ports if port not in current_ports]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ for port in ports_to_add:
+ self.add_portset_ports(port)
+ self.na_helper.changed = True
+
+ if self.parameters['state'] == 'absent':
+ ports_to_remove = [port for port in cd_ports if port in current_ports]
+ if len(ports_to_remove) > 0:
+ if not self.module.check_mode:
+ for port in ports_to_remove:
+ self.remove_portset_ports(port)
+ self.na_helper.changed = True
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_ports")
+ if self.parameters['resource_type'] == 'broadcast_domain':
+ self.modify_broadcast_domain_ports()
+ elif self.parameters['resource_type'] == 'portset':
+ self.modify_portset_ports()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ portset_obj = NetAppOntapPorts()
+ portset_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py
new file mode 100644
index 00000000..e2511f17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP Create/Delete portset
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete ONTAP portset, modify ports in a portset.
+ - Modify type(protocol) is not supported in ONTAP.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_portset
+options:
+ state:
+ description:
+ - If you want to create a portset.
+ default: present
+ type: str
+ vserver:
+ required: true
+ description:
+ - Name of the SVM.
+ type: str
+ name:
+ required: true
+ description:
+ - Name of the port set to create.
+ type: str
+ type:
+ description:
+ - Required for create.
+ - Protocols accepted for this portset.
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+ force:
+ description:
+ - If 'false' or not specified, the request will fail if there are any igroups bound to this portset.
+ - If 'true', forcibly destroy the portset, even if there are existing igroup bindings.
+ type: bool
+ default: False
+ ports:
+ description:
+ - Specify the ports associated with this portset. Should be comma separated.
+ - It represents the expected state of a list of ports at any time, and replaces the current value of ports.
+ - Adds a port if it is specified in expected state but not in current state.
+ - Deletes a port if it is in current state but not in expected state.
+ type: list
+ elements: str
+version_added: 2.8.0
+
+'''
+
+EXAMPLES = """
+ - name: Create Portset
+ na_ontap_portset:
+ state: present
+ vserver: vserver_name
+ name: portset_name
+ ports: a1
+ type: "{{ protocol type }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+
+ - name: Modify ports in portset
+ na_ontap_portset:
+ state: present
+ vserver: vserver_name
+ name: portset_name
+ ports: a1,a2
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+
+ - name: Delete Portset
+ na_ontap_portset:
+ state: absent
+ vserver: vserver_name
+ name: portset_name
+ force: True
+ type: "{{ protocol type }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPPortset(object):
+ """
+ Methods to create or delete portset
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ type=dict(required=False, type='str', choices=[
+ 'fcp', 'iscsi', 'mixed']),
+ force=dict(required=False, type='bool', default=False),
+ ports=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+
+ def portset_get_iter(self):
+ """
+ Compose NaElement object to query current portset using vserver, portset-name and portset-type parameters
+ :return: NaElement object for portset-get-iter with query
+ """
+ portset_get = netapp_utils.zapi.NaElement('portset-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ portset_info = netapp_utils.zapi.NaElement('portset-info')
+ portset_info.add_new_child('vserver', self.parameters['vserver'])
+ portset_info.add_new_child('portset-name', self.parameters['name'])
+ query.add_child_elem(portset_info)
+ portset_get.add_child_elem(query)
+ return portset_get
+
+ def portset_get(self):
+ """
+ Get current portset info
+ :return: Dictionary of current portset details if query successful, else return None
+ """
+ portset_get_iter = self.portset_get_iter()
+ result, portset_info = None, dict()
+ try:
+ result = self.server.invoke_successfully(portset_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching portset %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ # return portset details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ portset_get_info = result.get_child_by_name('attributes-list').get_child_by_name('portset-info')
+ portset_info['type'] = portset_get_info.get_child_content('portset-type')
+ if int(portset_get_info.get_child_content('portset-port-total')) > 0:
+ ports = portset_get_info.get_child_by_name('portset-port-info')
+ portset_info['ports'] = [port.get_content() for port in ports.get_children()]
+ else:
+ portset_info['ports'] = []
+ return portset_info
+ return None
+
+ def create_portset(self):
+ """
+ Create a portset
+ """
+ if self.parameters.get('type') is None:
+ self.module.fail_json(msg='Error: Missing required parameter for create (type)')
+ portset_info = netapp_utils.zapi.NaElement("portset-create")
+ portset_info.add_new_child("portset-name", self.parameters['name'])
+ portset_info.add_new_child("portset-type", self.parameters['type'])
+ try:
+ self.server.invoke_successfully(
+ portset_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating portset %s: %s" %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_portset(self):
+ """
+ Delete a portset
+ """
+ portset_info = netapp_utils.zapi.NaElement("portset-destroy")
+ portset_info.add_new_child("portset-name", self.parameters['name'])
+ if self.parameters.get('force'):
+ portset_info.add_new_child("force", str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(
+ portset_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting portset %s: %s" %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_ports(self, ports):
+ """
+ Removes all existing ports from portset
+ :return: None
+ """
+ for port in ports:
+ self.modify_port(port, 'portset-remove', 'removing')
+
+ def add_ports(self):
+ """
+ Add the list of ports to portset
+ :return: None
+ """
+ # don't add if ports is empty string
+ if self.parameters.get('ports') == [''] or self.parameters.get('ports') is None:
+ return
+ for port in self.parameters['ports']:
+ self.modify_port(port, 'portset-add', 'adding')
+
+ def modify_port(self, port, zapi, action):
+ """
+ Add or remove an port to/from a portset
+ """
+ port.strip() # remove leading spaces if any (eg: if user types a space after comma in initiators list)
+ options = {'portset-name': self.parameters['name'],
+ 'portset-port-name': port}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error %s port in portset %s: %s' % (action, self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Applies action from playbook
+ """
+ netapp_utils.ems_log_event("na_ontap_autosupport", self.server)
+ current, modify = self.portset_get(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ if self.parameters.get('type') and self.parameters['type'] != current['type']:
+ self.module.fail_json(msg="modify protocol(type) not supported and %s already exists in vserver %s under different type" %
+ (self.parameters['name'], self.parameters['vserver']))
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_portset()
+ self.add_ports()
+ elif cd_action == 'delete':
+ self.delete_portset()
+ elif modify:
+ self.remove_ports(current['ports'])
+ self.add_ports()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ portset_obj = NetAppONTAPPortset()
+ portset_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py
new file mode 100644
index 00000000..28b5a773
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_qos_adaptive_policy_group
+short_description: NetApp ONTAP Adaptive Quality of Service policy group.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@joshedmonds) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, destroy, modify, or rename an Adaptive QoS policy group on NetApp ONTAP. Module is based on the standard QoS policy group module.
+
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified policy group should exist or not.
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the policy group to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ from_name:
+ description:
+ - Name of the existing policy group to be renamed to name.
+ type: str
+
+ absolute_min_iops:
+ description:
+ - Absolute minimum IOPS defined by this policy.
+ type: str
+
+ expected_iops:
+ description:
+ - Minimum expected IOPS defined by this policy.
+ type: str
+
+ peak_iops:
+ description:
+ - Maximum possible IOPS per allocated or used TB|GB.
+ type: str
+
+ peak_iops_allocation:
+ choices: ['allocated_space', 'used_space']
+ description:
+ - Whether peak_iops is specified by allocated or used space.
+ default: 'used_space'
+ type: str
+
+ force:
+ type: bool
+ default: False
+ description:
+ - Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group.
+'''
+
+EXAMPLES = """
+ - name: create adaptive qos policy group
+ na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 100IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: allocated_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: modify adaptive qos policy group expected iops
+ na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 125IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: allocated_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: modify adaptive qos policy group peak iops allocation
+ na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 125IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: used_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: delete qos policy group
+ na_ontap_qos_adaptive_policy_group:
+ state: absent
+ name: aq_policy_1
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapAdaptiveQosPolicyGroup(object):
+ """
+ Create, delete, modify and rename a policy group.
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap qos policy group class.
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ absolute_min_iops=dict(required=False, type='str'),
+ expected_iops=dict(required=False, type='str'),
+ peak_iops=dict(required=False, type='str'),
+ peak_iops_allocation=dict(choices=['allocated_space', 'used_space'], default='used_space'),
+ force=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module)
+
+ def get_policy_group(self, policy_group_name=None):
+ """
+ Return details of a policy group.
+ :param policy_group_name: policy group name
+ :return: policy group details.
+ :rtype: dict.
+ """
+ if policy_group_name is None:
+ policy_group_name = self.parameters['name']
+ policy_group_get_iter = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-get-iter')
+ policy_group_info = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-info')
+ policy_group_info.add_new_child('policy-group', policy_group_name)
+ policy_group_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_group_info)
+ policy_group_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(policy_group_get_iter, True)
+ policy_group_detail = None
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-adaptive-policy-group-info')
+
+ policy_group_detail = {
+ 'name': policy_info.get_child_content('policy-group'),
+ 'vserver': policy_info.get_child_content('vserver'),
+ 'absolute_min_iops': policy_info.get_child_content('absolute-min-iops'),
+ 'expected_iops': policy_info.get_child_content('expected-iops'),
+ 'peak_iops': policy_info.get_child_content('peak-iops'),
+ 'peak_iops_allocation': policy_info.get_child_content('peak-iops-allocation')
+ }
+ return policy_group_detail
+
+ def create_policy_group(self):
+ """
+ create a policy group name.
+ """
+ policy_group = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-create')
+ policy_group.add_new_child('policy-group', self.parameters['name'])
+ policy_group.add_new_child('vserver', self.parameters['vserver'])
+ if self.parameters.get('absolute_min_iops'):
+ policy_group.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops'])
+ if self.parameters.get('expected_iops'):
+ policy_group.add_new_child('expected-iops', self.parameters['expected_iops'])
+ if self.parameters.get('peak_iops'):
+ policy_group.add_new_child('peak-iops', self.parameters['peak_iops'])
+ if self.parameters.get('peak_iops_allocation'):
+ policy_group.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation'])
+ try:
+ self.server.invoke_successfully(policy_group, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating adaptive qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_policy_group(self, policy_group=None):
+ """
+ delete an existing policy group.
+ :param policy_group: policy group name.
+ """
+ if policy_group is None:
+ policy_group = self.parameters['name']
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-delete')
+ policy_group_obj.add_new_child('policy-group', policy_group)
+ if self.parameters.get('force'):
+ policy_group_obj.add_new_child('force', str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting adaptive qos policy group %s: %s' %
+ (policy_group, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_policy_group(self):
+ """
+ Modify policy group.
+ """
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-modify')
+ policy_group_obj.add_new_child('policy-group', self.parameters['name'])
+ if self.parameters.get('absolute_min_iops'):
+ policy_group_obj.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops'])
+ if self.parameters.get('expected_iops'):
+ policy_group_obj.add_new_child('expected-iops', self.parameters['expected_iops'])
+ if self.parameters.get('peak_iops'):
+ policy_group_obj.add_new_child('peak-iops', self.parameters['peak_iops'])
+ if self.parameters.get('peak_iops_allocation'):
+ policy_group_obj.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation'])
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying adaptive qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_policy_group(self):
+ """
+ Rename policy group name.
+ """
+ rename_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-rename')
+ rename_obj.add_new_child('new-name', self.parameters['name'])
+ rename_obj.add_new_child('policy-group-name', self.parameters['from_name'])
+ try:
+ self.server.invoke_successfully(rename_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming adaptive qos policy group %s: %s' %
+ (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_helper(self, modify):
+ """
+ helper method to modify policy group.
+ :param modify: modified attributes.
+ """
+ for attribute in modify.keys():
+ if attribute in ['absolute_min_iops', 'expected_iops', 'peak_iops', 'peak_iops_allocation']:
+ self.modify_policy_group()
+
+ def apply(self):
+ """
+ Run module based on playbook
+ """
+ self.autosupport_log("na_ontap_qos_policy_group")
+ current = self.get_policy_group()
+ rename, cd_action = None, None
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_policy_group(self.parameters['from_name']), current)
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_policy_group()
+ if cd_action == 'create':
+ self.create_policy_group()
+ elif cd_action == 'delete':
+ self.delete_policy_group()
+ elif modify:
+ self.modify_helper(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def autosupport_log(self, event_name):
+ """
+ Create a log event against the provided vserver
+ """
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ netapp_utils.ems_log_event(event_name, server)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ qos_policy_group = NetAppOntapAdaptiveQosPolicyGroup()
+ qos_policy_group.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py
new file mode 100644
index 00000000..a74bc987
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_qos_policy_group
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_qos_policy_group
+short_description: NetApp ONTAP manage policy group in Quality of Service.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, destroy, modify, or rename QoS policy group on NetApp ONTAP.
+
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified policy group should exist or not.
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the policy group to manage.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the existing policy group to be renamed to name.
+ type: str
+
+ max_throughput:
+ description:
+ - Maximum throughput defined by this policy.
+ type: str
+
+ min_throughput:
+ description:
+ - Minimum throughput defined by this policy.
+ type: str
+
+ is_shared:
+ description:
+ - Whether the SLOs of the policy group are shared between the workloads or if the SLOs are applied separately to each workload.
+ type: bool
+ version_added: 20.12.0
+
+ force:
+ type: bool
+ default: False
+ description:
+ - Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group.
+'''
+
+EXAMPLES = """
+ - name: create qos policy group
+ na_ontap_qos_policy_group:
+ state: present
+ name: policy_1
+ vserver: policy_vserver
+ max_throughput: 800KB/s,800iops
+ min_throughput: 100iops
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: modify qos policy group max throughput
+ na_ontap_qos_policy_group:
+ state: present
+ name: policy_1
+ vserver: policy_vserver
+ max_throughput: 900KB/s,800iops
+ min_throughput: 100iops
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: delete qos policy group
+ na_ontap_qos_policy_group:
+ state: absent
+ name: policy_1
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapQosPolicyGroup(object):
+ """
+ Create, delete, modify and rename a policy group.
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap qos policy group class.
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ max_throughput=dict(required=False, type='str'),
+ min_throughput=dict(required=False, type='str'),
+ is_shared=dict(required=False, type='bool'),
+ force=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module)
+
+ def get_policy_group(self, policy_group_name=None):
+ """
+ Return details of a policy group.
+ :param policy_group_name: policy group name
+ :return: policy group details.
+ :rtype: dict.
+ """
+ if policy_group_name is None:
+ policy_group_name = self.parameters['name']
+ policy_group_get_iter = netapp_utils.zapi.NaElement('qos-policy-group-get-iter')
+ policy_group_info = netapp_utils.zapi.NaElement('qos-policy-group-info')
+ policy_group_info.add_new_child('policy-group', policy_group_name)
+ policy_group_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_group_info)
+ policy_group_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(policy_group_get_iter, True)
+ policy_group_detail = None
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-policy-group-info')
+
+ policy_group_detail = {
+ 'name': policy_info.get_child_content('policy-group'),
+ 'vserver': policy_info.get_child_content('vserver'),
+ 'max_throughput': policy_info.get_child_content('max-throughput'),
+ 'min_throughput': policy_info.get_child_content('min-throughput'),
+ 'is_shared': self.na_helper.get_value_for_bool(True, policy_info.get_child_content('is-shared'))
+ }
+ return policy_group_detail
+
+ def create_policy_group(self):
+ """
+ create a policy group name.
+ """
+ policy_group = netapp_utils.zapi.NaElement('qos-policy-group-create')
+ policy_group.add_new_child('policy-group', self.parameters['name'])
+ policy_group.add_new_child('vserver', self.parameters['vserver'])
+ if self.parameters.get('max_throughput'):
+ policy_group.add_new_child('max-throughput', self.parameters['max_throughput'])
+ if self.parameters.get('min_throughput'):
+ policy_group.add_new_child('min-throughput', self.parameters['min_throughput'])
+ if self.parameters.get('is_shared') is not None:
+ policy_group.add_new_child('is-shared', self.na_helper.get_value_for_bool(False, self.parameters['is_shared']))
+ try:
+ self.server.invoke_successfully(policy_group, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_policy_group(self, policy_group=None):
+ """
+ delete an existing policy group.
+ :param policy_group: policy group name.
+ """
+ if policy_group is None:
+ policy_group = self.parameters['name']
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-policy-group-delete')
+ policy_group_obj.add_new_child('policy-group', policy_group)
+ if self.parameters.get('force'):
+ policy_group_obj.add_new_child('force', str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting qos policy group %s: %s' %
+ (policy_group, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_policy_group(self):
+ """
+ Modify policy group.
+ """
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-policy-group-modify')
+ policy_group_obj.add_new_child('policy-group', self.parameters['name'])
+ if self.parameters.get('max_throughput'):
+ policy_group_obj.add_new_child('max-throughput', self.parameters['max_throughput'])
+ if self.parameters.get('min_throughput'):
+ policy_group_obj.add_new_child('min-throughput', self.parameters['min_throughput'])
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_policy_group(self):
+ """
+ Rename policy group name.
+ """
+ rename_obj = netapp_utils.zapi.NaElement('qos-policy-group-rename')
+ rename_obj.add_new_child('new-name', self.parameters['name'])
+ rename_obj.add_new_child('policy-group-name', self.parameters['from_name'])
+ try:
+ self.server.invoke_successfully(rename_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming qos policy group %s: %s' %
+ (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_helper(self, modify):
+ """
+ helper method to modify policy group.
+ :param modify: modified attributes.
+ """
+ if 'is_shared' in modify:
+ self.module.fail_json(msg='Error cannot modify is_shared attribute.')
+ if any([attribute in modify for attribute in ['max_throughput', 'min_throughput']]):
+ self.modify_policy_group()
+
+ def apply(self):
+ """
+ Run module based on playbook
+ """
+ self.asup_log_for_cserver("na_ontap_qos_policy_group")
+ current = self.get_policy_group()
+ rename, cd_action = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create policy by renaming an existing one
+ old_policy = self.get_policy_group(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_policy, current)
+ if rename:
+ current = old_policy
+ cd_action = None
+ if rename is None:
+ self.module.fail_json(msg='Error renaming qos policy group: cannot find %s' %
+ self.parameters['from_name'])
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_policy_group()
+ if cd_action == 'create':
+ self.create_policy_group()
+ elif cd_action == 'delete':
+ self.delete_policy_group()
+ elif modify:
+ self.modify_helper(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ qos_policy_group = NetAppOntapQosPolicyGroup()
+ qos_policy_group.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py
new file mode 100644
index 00000000..9d05d75b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_qtree
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_qtree
+
+short_description: NetApp ONTAP manage qtrees
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy Qtrees.
+
+options:
+
+ state:
+ description:
+ - Whether the specified qtree should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the qtree to manage.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the qtree to be renamed.
+ version_added: 2.7.0
+ type: str
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the qtree should exist on.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ export_policy:
+ description:
+ - The name of the export policy to apply.
+ version_added: 2.9.0
+ type: str
+
+ security_style:
+ description:
+ - The security style for the qtree.
+ choices: ['unix', 'ntfs', 'mixed']
+ type: str
+ version_added: 2.9.0
+
+ oplocks:
+ description:
+ - Whether the oplocks should be enabled or not for the qtree.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+
+ unix_permissions:
+ description:
+ - File permissions bits of the qtree.
+ version_added: 2.9.0
+ type: str
+
+ force_delete:
+ description:
+ - Whether the qtree should be deleted even if files still exist.
+ - Note that the default of true reflect the REST API behavior.
+ - a value of false is not supported with REST.
+ type: bool
+ default: true
+ version_added: 20.8.0
+
+ wait_for_completion:
+ description:
+ - Only applicable for REST. When using ZAPI, the deletion is always synchronous.
+ - Deleting a qtree may take time if many files need to be deleted.
+ - Set this parameter to 'true' for synchronous execution during delete.
+ - Set this parameter to 'false' for asynchronous execution.
+ - For asynchronous, execution exits as soon as the request is sent, and the qtree is deleted in background.
+ type: bool
+ default: true
+ version_added: 2.9.0
+
+ time_out:
+ description:
+ - Maximum time to wait for qtree deletion in seconds when wait_for_completion is True.
+ - Error out if task is not completed in defined time.
+ - Default is set to 3 minutes.
+ default: 180
+ type: int
+ version_added: 2.9.0
+'''
+
+EXAMPLES = """
+- name: Create Qtrees
+ na_ontap_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ export_policy: policyName
+ security_style: mixed
+ oplocks: disabled
+ unix_permissions:
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename Qtrees
+ na_ontap_qtree:
+ state: present
+ from_name: ansibleQTree_rename
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import datetime
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapQTree(object):
+ '''Class with qtree operations'''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ flexvol_name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ export_policy=dict(required=False, type='str'),
+ security_style=dict(required=False, type='str', choices=['unix', 'ntfs', 'mixed']),
+ oplocks=dict(required=False, type='str', choices=['enabled', 'disabled']),
+ unix_permissions=dict(required=False, type='str'),
+ force_delete=dict(required=False, type='bool', default=True),
+ wait_for_completion=dict(required=False, type='bool', default=True),
+ time_out=dict(required=False, type='int', default=180),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name'])
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+
+ def get_qtree(self, name=None):
+ """
+ Checks if the qtree exists.
+ :param:
+ name : qtree name
+ :return:
+ Details about the qtree
+ False if qtree is not found
+ :rtype: bool
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ api = "storage/qtrees"
+ query = {'fields': 'export_policy,unix_permissions,security_style,volume',
+ 'svm.name': self.parameters['vserver'],
+ 'volume': self.parameters['flexvol_name'],
+ 'name': name}
+ message, error = self.rest_api.get(api, query)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response in get_qtree from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ return message['records'][0]
+ else:
+ qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-info', **{'vserver': self.parameters['vserver'],
+ 'volume': self.parameters['flexvol_name'],
+ 'qtree': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ qtree_list_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(qtree_list_iter,
+ enable_tunneling=True)
+ return_q = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ return_q = {'export_policy': result['attributes-list']['qtree-info']['export-policy'],
+ 'oplocks': result['attributes-list']['qtree-info']['oplocks'],
+ 'security_style': result['attributes-list']['qtree-info']['security-style']}
+
+ if result['attributes-list']['qtree-info'].get_child_by_name('mode'):
+ return_q['unix_permissions'] = result['attributes-list']['qtree-info']['mode']
+ else:
+ return_q['unix_permissions'] = ''
+
+ return return_q
+
+ def create_qtree(self):
+ """
+ Create a qtree
+ """
+ if self.use_rest:
+ api = "storage/qtrees"
+ body = {'name': self.parameters['name'], 'volume': {'name': self.parameters['flexvol_name']},
+ 'svm': {'name': self.parameters['vserver']}}
+ if self.parameters.get('export_policy'):
+ body['export_policy'] = self.parameters['export_policy']
+ if self.parameters.get('security_style'):
+ body['security_style'] = self.parameters['security_style']
+ if self.parameters.get('unix_permissions'):
+ body['unix_permissions'] = self.parameters['unix_permissions']
+ __, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ options = {'qtree': self.parameters['name'], 'volume': self.parameters['flexvol_name']}
+ if self.parameters.get('export_policy'):
+ options['export-policy'] = self.parameters['export_policy']
+ if self.parameters.get('security_style'):
+ options['security-style'] = self.parameters['security_style']
+ if self.parameters.get('oplocks'):
+ options['oplocks'] = self.parameters['oplocks']
+ if self.parameters.get('unix_permissions'):
+ options['mode'] = self.parameters['unix_permissions']
+ qtree_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-create', **options)
+ try:
+ self.server.invoke_successfully(qtree_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error provisioning qtree %s: %s"
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_qtree(self, current):
+ """
+ Delete a qtree
+ """
+ if self.use_rest:
+ uuid = current['volume']['uuid']
+ qid = str(current['id'])
+ api = "storage/qtrees/%s/%s" % (uuid, qid)
+ query = {'return_timeout': 3}
+ response, error = self.rest_api.delete(api, params=query)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'job' in response and self.parameters['wait_for_completion']:
+ message, error = self.rest_api.wait_on_job(response['job'], timeout=self.parameters['time_out'], increment=10)
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ else:
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ options = {'qtree': path}
+ if self.parameters['force_delete']:
+ options['force'] = "true"
+ qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-delete', **options)
+
+ try:
+ self.server.invoke_successfully(qtree_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_qtree(self, current):
+ """
+ Rename a qtree
+ """
+ if self.use_rest:
+ body = {'name': self.parameters['name']}
+ uuid = current['volume']['uuid']
+ qid = str(current['id'])
+ api = "storage/qtrees/%s/%s" % (uuid, qid)
+ dummy, error = self.rest_api.patch(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['from_name'])
+ new_path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-rename', **{'qtree': path,
+ 'new-qtree-name': new_path})
+
+ try:
+ self.server.invoke_successfully(qtree_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error renaming qtree %s: %s"
+ % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_qtree(self, current):
+ """
+ Modify a qtree
+ """
+ if self.use_rest:
+ now = datetime.datetime.now()
+ body = {}
+ if self.parameters.get('security_style'):
+ body['security_style'] = self.parameters['security_style']
+ if self.parameters.get('unix_permissions'):
+ body['unix_permissions'] = self.parameters['unix_permissions']
+ if self.parameters.get('export_policy'):
+ body['export_policy'] = {'name': self.parameters['export_policy']}
+ uuid = current['volume']['uuid']
+ qid = str(current['id'])
+ api = "storage/qtrees/%s/%s" % (uuid, qid)
+ timeout = 120
+ query = {'return_timeout': timeout}
+ dummy, error = self.rest_api.patch(api, body, query)
+
+ later = datetime.datetime.now()
+ time_elapsed = later - now
+ # modify will not return any error if return_timeout is 0, so we set it to 120 seconds as default
+ if time_elapsed.seconds > (timeout - 1):
+ self.module.fail_json(msg="Too long to run")
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ options = {'qtree': self.parameters['name'], 'volume': self.parameters['flexvol_name']}
+ if self.parameters.get('export_policy'):
+ options['export-policy'] = self.parameters['export_policy']
+ if self.parameters.get('security_style'):
+ options['security-style'] = self.parameters['security_style']
+ if self.parameters.get('oplocks'):
+ options['oplocks'] = self.parameters['oplocks']
+ if self.parameters.get('unix_permissions'):
+ options['mode'] = self.parameters['unix_permissions']
+ qtree_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-modify', **options)
+ try:
+ self.server.invoke_successfully(qtree_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying qtree %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/delete/modify/rename operations'''
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_qtree", self.server)
+ current = self.get_qtree()
+ rename, cd_action, modify = None, None, None
+ if self.parameters.get('from_name'):
+ from_qtree = self.get_qtree(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(from_qtree, current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming: qtree %s does not exist' % self.parameters['from_name'])
+ if rename:
+ current = from_qtree
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ if self.parameters.get('security_style') and self.parameters['security_style'] != current['security_style']:
+ modify = True
+ if self.parameters.get('unix_permissions') and \
+ self.parameters['unix_permissions'] != str(current['unix_permissions']):
+ modify = True
+ # rest and zapi handle export policy differently
+ if self.use_rest:
+ if self.parameters.get('export_policy') and \
+ self.parameters['export_policy'] != current['export_policy']['name']:
+ modify = True
+ else:
+ if self.parameters.get('export_policy') and \
+ self.parameters['export_policy'] != current['export_policy']:
+ modify = True
+ if self.use_rest and cd_action == 'delete' and not self.parameters['force_delete']:
+ self.module.fail_json(msg='Error: force_delete option is not supported for REST, unless set to true.')
+
+ if modify:
+ self.na_helper.changed = True
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_qtree()
+ elif cd_action == 'delete':
+ self.delete_qtree(current)
+ else:
+ if rename:
+ self.rename_qtree(current)
+ if modify:
+ self.modify_qtree(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Apply qtree operations from playbook'''
+ qtree_obj = NetAppOntapQTree()
+ qtree_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py
new file mode 100644
index 00000000..527d8dff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_quota_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_quota_policy
+short_description: NetApp Ontap create, assign, rename or delete quota policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '19.11.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, assign, rename or delete the quota policy
+options:
+ state:
+ description:
+ - Whether the specified quota policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the quota policy.
+ required: true
+ type: str
+
+ name:
+ description:
+ - Specifies the quota policy name to create or rename to.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the existing quota policy to be renamed to name.
+ type: str
+
+ auto_assign:
+ description:
+ - when true, assign the policy to the vserver, whether it is newly created, renamed, or already exists.
+ - when true, the policy identified by name replaces the already assigned policy.
+ - when false, the policy is created if it does not already exist but is not assigned.
+ type: bool
+ default: true
+ version_added: 20.12.0
+"""
+
+EXAMPLES = """
+ - name: Create quota policy
+ na_ontap_quota_policy:
+ state: present
+ vserver: SVM1
+ name: ansible_policy
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Rename quota policy
+ na_ontap_quota_policy:
+ state: present
+ vserver: SVM1
+ name: new_ansible
+ from_name: ansible
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete quota policy
+ na_ontap_quota_policy:
+ state: absent
+ vserver: SVM1
+ name: ansible_policy
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+import ansible_collections.netapp.ontap.plugins.module_utils.zapis_svm as zapis_svm
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapQuotaPolicy(object):
+ """
+ Create, assign, rename or delete a quota policy
+ """
+
+ def __init__(self):
+ """
+ Initialize the ONTAP quota policy class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ auto_assign=dict(required=False, type='bool', default=True),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['name', 'vserver'])
+ ],
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_quota_policy(self, policy_name=None):
+
+ if policy_name is None:
+ policy_name = self.parameters['name']
+
+ return_value = None
+ quota_policy_get_iter = netapp_utils.zapi.NaElement('quota-policy-get-iter')
+ quota_policy_info = netapp_utils.zapi.NaElement('quota-policy-info')
+ quota_policy_info.add_new_child('policy-name', policy_name)
+ quota_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(quota_policy_info)
+ quota_policy_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(quota_policy_get_iter, True)
+ if result.get_child_by_name('attributes-list'):
+ quota_policy_attributes = result['attributes-list']['quota-policy-info']
+ return_value = {
+ 'name': quota_policy_attributes['policy-name']
+ }
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quota policy %s: %s' % (policy_name, to_native(error)),
+ exception=traceback.format_exc())
+ return return_value
+
+ def create_quota_policy(self):
+ """
+ Creates a new quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-create")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['name'])
+ quota_policy_obj.add_new_child("vserver", self.parameters['vserver'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating quota policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_quota_policy(self):
+ """
+ Deletes a quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-delete")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting quota policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_quota_policy(self):
+ """
+ Rename a quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-rename")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['from_name'])
+ quota_policy_obj.add_new_child("vserver", self.parameters['vserver'])
+ quota_policy_obj.add_new_child("new-policy-name", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming quota policy %s: %s' % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_quota_policy", self.server)
+ current = self.get_quota_policy()
+ # rename and create are mutually exclusive
+ rename, cd_action = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create policy by renaming it
+ rename = self.na_helper.is_rename_action(self.get_quota_policy(self.parameters['from_name']), current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming quota policy: %s does not exist.' % self.parameters['from_name'])
+
+ # check if policy should be assigned
+ assign_policy = cd_action == 'create' and self.parameters['auto_assign']
+ if cd_action is None and current and self.parameters['auto_assign']:
+ # find out if the existing policy needs to be changed
+ svm = zapis_svm.get_vserver(self.server, self.parameters['vserver'])
+ if svm.get('quota_policy') != self.parameters['name']:
+ assign_policy = True
+ self.na_helper.changed = True
+ if cd_action == 'delete':
+ # can't delete if already assigned
+ svm = zapis_svm.get_vserver(self.server, self.parameters['vserver'])
+ if svm.get('quota_policy') == self.parameters['name']:
+ self.module.fail_json(msg='Error policy %s cannot be deleted as it is assigned to the vserver %s' %
+ (self.parameters['name'], self.parameters['vserver']))
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_quota_policy()
+ elif cd_action == 'create':
+ self.create_quota_policy()
+ elif cd_action == 'delete':
+ self.delete_quota_policy()
+ if assign_policy:
+ zapis_svm.modify_vserver(self.server, self.module, self.parameters['vserver'], modify=dict(quota_policy=self.parameters['name']))
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap quota policy object and runs the correct play task
+ """
+ obj = NetAppOntapQuotaPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py
new file mode 100644
index 00000000..330a6986
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py
@@ -0,0 +1,450 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_quotas
+short_description: NetApp ONTAP Quotas
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Set/Modify/Delete quota on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified quota should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ vserver:
+ required: true
+ description:
+ - Name of the vserver to use.
+ type: str
+ volume:
+ description:
+ - The name of the volume that the quota resides on.
+ required: true
+ type: str
+ quota_target:
+ description:
+ - The quota target of the type specified.
+ required: true
+ type: str
+ qtree:
+ description:
+ - Name of the qtree for the quota.
+ - For user or group rules, it can be the qtree name or "" if no qtree.
+ - For tree type rules, this field must be "".
+ default: ""
+ type: str
+ type:
+ description:
+ - The type of quota rule
+ choices: ['user', 'group', 'tree']
+ required: true
+ type: str
+ policy:
+ description:
+ - Name of the quota policy from which the quota rule should be obtained.
+ type: str
+ set_quota_status:
+ description:
+ - Whether the specified volume should have quota status on or off.
+ type: bool
+ perform_user_mapping:
+ description:
+ - Whether quota management will perform user mapping for the user specified in quota-target.
+ - User mapping can be specified only for a user quota rule.
+ type: bool
+ version_added: 20.12.0
+ file_limit:
+ description:
+ - The number of files that the target can have.
+ type: str
+ disk_limit:
+ description:
+ - The amount of disk space that is reserved for the target.
+ type: str
+ soft_file_limit:
+ description:
+ - The number of files the target would have to exceed before a message is logged and an SNMP trap is generated.
+ type: str
+ soft_disk_limit:
+ description:
+ - The amount of disk space the target would have to exceed before a message is logged and an SNMP trap is generated.
+ type: str
+ threshold:
+ description:
+ - The amount of disk space the target would have to exceed before a message is logged.
+ type: str
+ activate_quota_on_change:
+ description:
+ - Method to use to activate quota on a change.
+ choices: ['resize', 'reinitialize', 'none']
+ default: resize
+ type: str
+ version_added: 20.12.0
+'''
+
+EXAMPLES = """
+ - name: Add/Set quota
+ na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Resize quota
+ na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ activate_quota_on_change: resize
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Reinitialize quota
+ na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ activate_quota_on_change: reinitialize
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: modify quota
+ na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ threshold: 3
+ set_quota_status: False
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete quota
+ na_ontap_quotas:
+ state: absent
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPQuotas(object):
+ '''Class with quotas methods'''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ volume=dict(required=True, type='str'),
+ quota_target=dict(required=True, type='str'),
+ qtree=dict(required=False, type='str', default=""),
+ type=dict(required=True, type='str', choices=['user', 'group', 'tree']),
+ policy=dict(required=False, type='str'),
+ set_quota_status=dict(required=False, type='bool'),
+ perform_user_mapping=dict(required=False, type='bool'),
+ file_limit=dict(required=False, type='str'),
+ disk_limit=dict(required=False, type='str'),
+ soft_file_limit=dict(required=False, type='str'),
+ soft_disk_limit=dict(required=False, type='str'),
+ threshold=dict(required=False, type='str'),
+ activate_quota_on_change=dict(required=False, type='str', choices=['resize', 'reinitialize', 'none'], default='resize')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_quota_status(self):
+ """
+ Return details about the quota status
+ :param:
+ name : volume name
+ :return: status of the quota. None if not found.
+ :rtype: dict
+ """
+ quota_status_get = netapp_utils.zapi.NaElement('quota-status')
+ quota_status_get.translate_struct({
+ 'volume': self.parameters['volume']
+ })
+ try:
+ result = self.server.invoke_successfully(quota_status_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quotas status info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result:
+ return result['status']
+ return None
+
+ def get_quotas(self):
+ """
+ Get quota details
+ :return: name of volume if quota exists, None otherwise
+ """
+ quota_get = netapp_utils.zapi.NaElement('quota-list-entries-iter')
+ query = {
+ 'query': {
+ 'quota-entry': {
+ 'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ quota_get.translate_struct(query)
+ if self.parameters.get('policy'):
+ quota_get['query']['quota-entry'].add_new_child('policy', self.parameters['policy'])
+ try:
+ result = self.server.invoke_successfully(quota_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quotas info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return_values = {'volume': result['attributes-list']['quota-entry']['volume'],
+ 'file_limit': result['attributes-list']['quota-entry']['file-limit'],
+ 'disk_limit': result['attributes-list']['quota-entry']['disk-limit'],
+ 'soft_file_limit': result['attributes-list']['quota-entry']['soft-file-limit'],
+ 'soft_disk_limit': result['attributes-list']['quota-entry']['soft-disk-limit'],
+ 'threshold': result['attributes-list']['quota-entry']['threshold']}
+ value = self.na_helper.safe_get(result, ['attributes-list', 'quota-entry', 'perform-user-mapping'])
+ if value is not None:
+ return_values['perform_user_mapping'] = self.na_helper.get_value_for_bool(True, value)
+ return return_values
+ return None
+
+ def quota_entry_set(self):
+ """
+ Adds a quota entry
+ """
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+
+ if self.parameters.get('file_limit'):
+ options['file-limit'] = self.parameters['file_limit']
+ if self.parameters.get('disk_limit'):
+ options['disk-limit'] = self.parameters['disk_limit']
+ if self.parameters.get('perform_user_mapping') is not None:
+ options['perform-user-mapping'] = str(self.parameters['perform_user_mapping'])
+ if self.parameters.get('soft_file_limit'):
+ options['soft-file-limit'] = self.parameters['soft_file_limit']
+ if self.parameters.get('soft_disk_limit'):
+ options['soft-disk-limit'] = self.parameters['soft_disk_limit']
+ if self.parameters.get('threshold'):
+ options['threshold'] = self.parameters['threshold']
+ if self.parameters.get('policy'):
+ options['policy'] = self.parameters['policy']
+ set_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-set-entry', **options)
+ try:
+ self.server.invoke_successfully(set_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding/modifying quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def quota_entry_delete(self):
+ """
+ Deletes a quota entry
+ """
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+ set_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-delete-entry', **options)
+ if self.parameters.get('policy'):
+ set_entry.add_new_child('policy', self.parameters['policy'])
+ try:
+ self.server.invoke_successfully(set_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def quota_entry_modify(self, modify_attrs):
+ """
+ Modifies a quota entry
+ """
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+ options.update(modify_attrs)
+ if self.parameters.get('file_limit'):
+ options['file-limit'] = self.parameters['file_limit']
+ if self.parameters.get('disk_limit'):
+ options['disk-limit'] = self.parameters['disk_limit']
+ if self.parameters.get('perform_user_mapping') is not None:
+ options['perform-user-mapping'] = str(self.parameters['perform_user_mapping'])
+ if self.parameters.get('soft_file_limit'):
+ options['soft-file-limit'] = self.parameters['soft_file_limit']
+ if self.parameters.get('soft_disk_limit'):
+ options['soft-disk-limit'] = self.parameters['soft_disk_limit']
+ if self.parameters.get('threshold'):
+ options['threshold'] = self.parameters['threshold']
+ if self.parameters.get('policy'):
+ options['policy'] = str(self.parameters['policy'])
+ modify_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-modify-entry', **options)
+ try:
+ self.server.invoke_successfully(modify_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def on_or_off_quota(self, status):
+ """
+ on or off quota
+ """
+ quota = netapp_utils.zapi.NaElement.create_node_with_children(
+ status, **{'volume': self.parameters['volume']})
+ try:
+ self.server.invoke_successfully(quota,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting %s for %s: %s'
+ % (status, self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def resize_quota(self):
+ """
+ resize quota
+ """
+ quota = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-resize', **{'volume': self.parameters['volume']})
+ try:
+ self.server.invoke_successfully(quota,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting %s for %s: %s'
+ % ('quota-resize', self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to quotas
+ """
+ netapp_utils.ems_log_event("na_ontap_quotas", self.server)
+ modify_quota_status = None
+ modify_quota = None
+ quota_status = None
+ current = self.get_quotas()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify_quota = self.na_helper.get_modified_attributes(current, self.parameters)
+ if 'set_quota_status' in self.parameters or modify_quota:
+ quota_status = self.get_quota_status()
+ if 'set_quota_status' in self.parameters and quota_status is not None:
+ quota_status_action = self.na_helper.get_modified_attributes(
+ {'set_quota_status': True if quota_status == 'on' else False}, self.parameters)
+ if quota_status_action:
+ modify_quota_status = 'quota-on' if quota_status_action['set_quota_status'] else 'quota-off'
+ if modify_quota is not None and modify_quota_status is None and quota_status == 'on':
+ # do we need to resize or reinitialize:
+ if self.parameters['activate_quota_on_change'] in ['resize', 'reinitialize']:
+ modify_quota_status = self.parameters['activate_quota_on_change']
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.quota_entry_set()
+ elif cd_action == 'delete':
+ self.quota_entry_delete()
+ elif modify_quota is not None:
+ for key in list(modify_quota):
+ modify_quota[key.replace("_", "-")] = modify_quota.pop(key)
+ self.quota_entry_modify(modify_quota)
+ if modify_quota_status in ['quota-off', 'quota-on']:
+ self.on_or_off_quota(modify_quota_status)
+ elif modify_quota_status == 'resize':
+ self.resize_quota()
+ elif modify_quota_status == 'reinitialize':
+ self.on_or_off_quota('quota-off')
+ time.sleep(10) # status switch interval
+ self.on_or_off_quota('quota-on')
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Execute action'''
+ quota_obj = NetAppONTAPQuotas()
+ quota_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py
new file mode 100644
index 00000000..3ddc8de7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_rest_cli
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Run system-cli commands on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_rest_cli
+short_description: NetApp ONTAP Run any cli command, the username provided needs to have console login permission.
+version_added: 2.9.0
+options:
+ command:
+ description:
+ - a string command.
+ required: true
+ type: str
+ verb:
+ description:
+ - a string indicating which api call to run
+ - OPTIONS is useful to know which verbs are supported by the REST API
+ choices: ['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']
+ required: true
+ type: str
+ params:
+ description:
+ - a dictionary of parameters to pass into the api call
+ type: dict
+ body:
+ description:
+ - a dictionary for info specification
+ type: dict
+'''
+
+EXAMPLES = """
+ - name: run ontap rest cli command
+ na_ontap_rest_cli:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: 'version'
+ verb: 'GET'
+
+ - name: run ontap rest cli command
+ na_ontap_rest_cli:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: 'security/login/motd'
+ verb: 'PATCH'
+ params: {'vserver': 'ansibleSVM'}
+ body: {'message': 'test'}
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPCommandREST(object):
+ ''' calls a CLI command '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='str'),
+ verb=dict(required=True, type='str', choices=['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']),
+ params=dict(required=False, type='dict', default={}),
+ body=dict(required=False, type='dict', default={})
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.rest_api = OntapRestAPI(self.module)
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.verb = parameters['verb']
+ self.params = parameters['params']
+ self.body = parameters['body']
+
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ self.module.fail_json(msg="use na_ontap_command for non-rest cli")
+
+ def run_command(self):
+ api = "private/cli/" + self.command
+
+ if self.verb == 'POST':
+ message, error = self.rest_api.post(api, self.body, self.params)
+ elif self.verb == 'GET':
+ message, error = self.rest_api.get(api, self.params)
+ elif self.verb == 'PATCH':
+ message, error = self.rest_api.patch(api, self.body, self.params)
+ elif self.verb == 'DELETE':
+ message, error = self.rest_api.delete(api, self.body, self.params)
+ elif self.verb == 'OPTIONS':
+ message, error = self.rest_api.options(api, self.params)
+ else:
+ self.module.fail_json(msg='Error running command %s:' % self.command,
+ exception=traceback.format_exc())
+
+ if error:
+ self.module.fail_json(msg=error)
+ return message
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = True
+ output = self.run_command()
+ self.module.exit_json(changed=changed, msg=output)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPCommandREST()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py
new file mode 100644
index 00000000..42d31c36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py
@@ -0,0 +1,617 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" NetApp ONTAP Info using REST APIs """
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_rest_info
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+short_description: NetApp ONTAP information gatherer using REST APIs
+description:
+ - This module allows you to gather various information about ONTAP configuration using REST APIs
+version_added: 20.5.0
+
+options:
+ state:
+ type: str
+ description:
+ - Returns "info"
+ default: "info"
+ choices: ['info']
+ gather_subset:
+ type: list
+ elements: str
+ description:
+ - When supplied, this argument will restrict the information collected
+ to a given subset. Either the info name or the Rest API can be given.
+ Possible values for this argument include
+ "aggregate_info" or "storage/aggregates",
+ "application_info" or "application/applications",
+ "application_template_info" or "application/templates",
+ "autosupport_config_info" or "support/autosupport",
+ "autosupport_messages_history" or "support/autosupport/messages",
+ "broadcast_domains_info" or "network/ethernet/broadcast-domains",
+ "cifs_home_directory_info" or "protocols/cifs/home-directory/search-paths",
+ "cifs_services_info" or "protocols/cifs/services",
+ "cifs_share_info" or "protocols/cifs/shares",
+ "cloud_targets_info" or "cloud/targets",
+ "cluster_chassis_info" or "cluster/chassis",
+ "cluster_jobs_info" or "cluster/jobs",
+ "cluster_metrics_info" or "cluster/metrics",
+ "cluster_node_info" or "cluster/nodes",
+ "cluster_peer_info" or "cluster/peers",
+ "cluster_schedules" or "cluster/schedules",
+ "cluster_software_download" or "cluster/software/download",
+ "cluster_software_history" or "cluster/software/history",
+ "cluster_software_packages" or "cluster/software/packages",
+ "disk_info" or "storage/disks",
+ "event_notification_info" or "support/ems/destinations",
+ "event_notification_destination_info" or "support/ems/destinations",
+ "initiator_groups_info" or "protocols/san/igroups",
+ "ip_interfaces_info" or "network/ip/interfaces",
+ "ip_routes_info" or "network/ip/routes",
+ "ip_service_policies" or "network/ip/service-policies",
+ "network_ipspaces_info" or "network/ipspaces",
+ "network_ports_info" or "network/ethernet/ports",
+ "ontap_system_version" or "cluster/software",
+ "san_fc_logins_info" or "network/fc/logins",
+ "san_fc_wppn-aliases" or "network/fc/wwpn-aliases",
+ "san_fcp_services" or "protocols/san/fcp/services",
+ "san_iscsi_credentials" or "protocols/san/iscsi/credentials",
+ "san_iscsi_services" or "protocols/san/iscsi/services",
+ "san_lun_maps" or "protocols/san/lun-maps",
+ "security_login_info" or "security/accounts",
+ "security_login_rest_role_info" or "security/roles",
+ "storage_flexcaches_info" or "storage/flexcache/flexcaches",
+ "storage_flexcaches_origin_info" or "storage/flexcache/origins",
+ "storage_luns_info" or "storage/luns",
+ "storage_NVMe_namespaces" or "storage/namespaces",
+ "storage_ports_info" or "storage/ports",
+ "storage_qos_policies" or "storage/qos/policies",
+ "storage_qtrees_config" or "storage/qtrees",
+ "storage_quota_reports" or "storage/quota/reports",
+ "storage_quota_policy_rules" or "storage/quota/rules",
+ "storage_shelves_config" or "storage/shelves",
+ "storage_snapshot_policies" or "storage/snapshot-policies",
+ "support_ems_config" or "support/ems",
+ "support_ems_events" or "support/ems/events",
+ "support_ems_filters" or "support/ems/filters",
+ "svm_dns_config_info" or "name-services/dns",
+ "svm_ldap_config_info" or "name-services/ldap",
+ "svm_name_mapping_config_info" or "name-services/name-mappings",
+ "svm_nis_config_info" or "name-services/nis",
+ "svm_peers_info" or "svm/peers",
+ "svm_peer-permissions_info" or "svm/peer-permissions",
+ "vserver_info" or "svm/svms",
+ "volume_info" or "storage/volumes",
+ Can specify a list of values to include a larger subset.
+ - REST APIs are supported with ONTAP 9.6 onwards.
+ default: "all"
+ max_records:
+ type: int
+ description:
+ - Maximum number of records returned in a single call.
+ default: 1024
+ fields:
+ type: list
+ elements: str
+ description:
+ - Request specific fields from subset.
+ '*' to return all the fields, one or more subsets are allowed.
+ '<list of fields>' to return specified fields, only one subset will be allowed.
+ - If the option is not present, return all the fields.
+ version_added: '20.6.0'
+ parameters:
+ description:
+ - Allows for any rest option to be passed in
+ type: dict
+ version_added: '20.7.0'
+'''
+
+EXAMPLES = '''
+- name: run ONTAP gather facts for vserver info
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - vserver_info
+- name: run ONTAP gather facts for aggregate info and volume info
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - aggregate_info
+ - volume_info
+- name: run ONTAP gather facts for all subsets
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - all
+- name: run ONTAP gather facts for aggregate info and volume info with fields section
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ fields:
+ - '*'
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - aggregate_info
+ - volume_info
+- name: run ONTAP gather facts for aggregate info with specified fields
+ na_ontap_info_rest:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ fields:
+ - 'uuid'
+ - 'name'
+ - 'node'
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - aggregate_info
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPGatherInfo(object):
+ '''Class with gather info methods'''
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(type='str', choices=['info'], default='info', required=False),
+ gather_subset=dict(default=['all'], type='list', elements='str', required=False),
+ max_records=dict(type='int', default=1024, required=False),
+ fields=dict(type='list', elements='str', required=False),
+ parameters=dict(type='dict', required=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.fields = list()
+
+ self.rest_api = OntapRestAPI(self.module)
+
+ def validate_ontap_version(self):
+ """
+ Method to validate the ONTAP version
+ """
+
+ api = 'cluster'
+ data = {'fields': ['version']}
+
+ ontap_version, error = self.rest_api.get(api, data)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return ontap_version
+
+ def get_subset_info(self, gather_subset_info):
+ """
+ Gather ONTAP information for the given subset using REST APIs
+ Input for REST APIs call : (api, data)
+ return gathered_ontap_info
+ """
+
+ api = gather_subset_info['api_call']
+ if gather_subset_info.pop('post', False):
+ self.run_post(gather_subset_info)
+ data = {'max_records': self.parameters['max_records'], 'fields': self.fields}
+ # allow for passing in any additional rest api fields
+ if self.parameters.get('parameters'):
+ for each in self.parameters['parameters']:
+ data[each] = self.parameters['parameters'][each]
+
+ gathered_ontap_info, error = self.rest_api.get(api, data)
+
+ if error:
+ # Fail the module if error occurs from REST APIs call
+ if int(error.get('code', 0)) == 6:
+ self.module.fail_json(msg="%s user is not authorized to make %s api call" % (self.parameters.get('username'), api))
+ # if Aggr recommender can't make a recommendation it will fail with the following error code.
+ # We don't want to fail
+ elif int(error.get('code', 0)) == 19726344 and "No recommendation can be made for this cluster" in error.get('message'):
+ return error.get('message')
+ # If the API doesn't exist (using an older system) we don't want to fail
+ elif int(error.get('code', 0)) == 3:
+ return error.get('message')
+ else:
+ self.module.fail_json(msg=error)
+ else:
+ return gathered_ontap_info
+
+ return None
+
+ def run_post(self, gather_subset_info):
+ api = gather_subset_info['api_call']
+ post_return, error = self.rest_api.post(api, None)
+ if error:
+ return None
+ message, error = self.rest_api.wait_on_job(post_return['job'], increment=5)
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def get_next_records(self, api):
+ """
+ Gather next set of ONTAP information for the specified api
+ Input for REST APIs call : (api, data)
+ return gather_subset_info
+ """
+
+ data = {}
+ gather_subset_info, error = self.rest_api.get(api, data)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return gather_subset_info
+
+ def convert_subsets(self):
+ """
+ Convert an info to the REST API
+ """
+ info_to_rest_mapping = {
+ "aggregate_info": "storage/aggregates",
+ "application_info": "application/applications",
+ "application_template_info": "application/templates",
+ "autosupport_config_info": "support/autosupport",
+ "autosupport_messages_history": "support/autosupport/messages",
+ "broadcast_domains_info": "network/ethernet/broadcast-domains",
+ "cifs_home_directory_info": "protocols/cifs/home-directory/search-paths",
+ "cifs_services_info": "protocols/cifs/services",
+ "cifs_share_info": "protocols/cifs/shares",
+ "cloud_targets_info": "cloud/targets",
+ "cluster_chassis_info": "cluster/chassis",
+ "cluster_jobs_info": "cluster/jobs",
+ "cluster_metrocluster_diagnostics": "cluster/metrocluster/diagnostics",
+ "cluster_metrics_info": "cluster/metrics",
+ "cluster_node_info": "cluster/nodes",
+ "cluster_peer_info": "cluster/peers",
+ "cluster_schedules": "cluster/schedules",
+ "cluster_software_download": "cluster/software/download",
+ "cluster_software_history": "cluster/software/history",
+ "cluster_software_packages": "cluster/software/packages",
+ "disk_info": "storage/disks",
+ "event_notification_info": "support/ems/destinations",
+ "event_notification_destination_info": "support/ems/destinations",
+ "initiator_groups_info": "protocols/san/igroups",
+ "ip_interfaces_info": "network/ip/interfaces",
+ "ip_routes_info": "network/ip/routes",
+ "ip_service_policies": "network/ip/service-policies",
+ "network_ipspaces_info": "network/ipspaces",
+ "network_ports_info": "network/ethernet/ports",
+ "ontap_system_version": "cluster/software",
+ "san_fc_logins_info": "network/fc/logins",
+ "san_fc_wppn-aliases": "network/fc/wwpn-aliases",
+ "san_fcp_services": "protocols/san/fcp/services",
+ "san_iscsi_credentials": "protocols/san/iscsi/credentials",
+ "san_iscsi_services": "protocols/san/iscsi/services",
+ "san_lun_maps": "protocols/san/lun-maps",
+ "security_login_info": "security/accounts",
+ "security_login_rest_role_info": "security/roles",
+ "storage_flexcaches_info": "storage/flexcache/flexcaches",
+ "storage_flexcaches_origin_info": "storage/flexcache/origins",
+ "storage_luns_info": "storage/luns",
+ "storage_NVMe_namespaces": "storage/namespaces",
+ "storage_ports_info": "storage/ports",
+ "storage_qos_policies": "storage/qos/policies",
+ "storage_qtrees_config": "storage/qtrees",
+ "storage_quota_reports": "storage/quota/reports",
+ "storage_quota_policy_rules": "storage/quota/rules",
+ "storage_shelves_config": "storage/shelves",
+ "storage_snapshot_policies": "storage/snapshot-policies",
+ "support_ems_config": "support/ems",
+ "support_ems_events": "support/ems/events",
+ "support_ems_filters": "support/ems/filters",
+ "svm_dns_config_info": "name-services/dns",
+ "svm_ldap_config_info": "name-services/ldap",
+ "svm_name_mapping_config_info": "name-services/name-mappings",
+ "svm_nis_config_info": "name-services/nis",
+ "svm_peers_info": "svm/peers",
+ "svm_peer-permissions_info": "svm/peer-permissions",
+ "vserver_info": "svm/svms",
+ "volume_info": "storage/volumes"
+ }
+ # Add rest API names as there info version, also make sure we don't add a duplicate
+ subsets = []
+ for subset in self.parameters['gather_subset']:
+ if subset in info_to_rest_mapping:
+ if info_to_rest_mapping[subset] not in subsets:
+ subsets.append(info_to_rest_mapping[subset])
+ else:
+ if subset not in subsets:
+ subsets.append(subset)
+ return subsets
+
+ def apply(self):
+ """
+ Perform pre-checks, call functions and exit
+ """
+
+ result_message = dict()
+
+ # Validating ONTAP version
+ self.validate_ontap_version()
+
+ # Defining gather_subset and appropriate api_call
+ get_ontap_subset_info = {
+ 'application/applications': {
+ 'api_call': 'application/applications',
+ },
+ 'application/templates': {
+ 'api_call': 'application/templates',
+ },
+ 'cloud/targets': {
+ 'api_call': 'cloud/targets',
+ },
+ 'cluster/chassis': {
+ 'api_call': 'cluster/chassis',
+ },
+ 'cluster/jobs': {
+ 'api_call': 'cluster/jobs',
+ },
+ 'cluster/metrocluster/diagnostics': {
+ 'api_call': 'cluster/metrocluster/diagnostics',
+ 'post': True
+ },
+ 'cluster/metrics': {
+ 'api_call': 'cluster/metrics',
+ },
+ 'cluster/nodes': {
+ 'api_call': 'cluster/nodes',
+ },
+ 'cluster/peers': {
+ 'api_call': 'cluster/peers',
+ },
+ 'cluster/schedules': {
+ 'api_call': 'cluster/schedules',
+ },
+ 'cluster/software': {
+ 'api_call': 'cluster/software',
+ },
+ 'cluster/software/download': {
+ 'api_call': 'cluster/software/download',
+ },
+ 'cluster/software/history': {
+ 'api_call': 'cluster/software/history',
+ },
+ 'cluster/software/packages': {
+ 'api_call': 'cluster/software/packages',
+ },
+ 'name-services/dns': {
+ 'api_call': 'name-services/dns',
+ },
+ 'name-services/ldap': {
+ 'api_call': 'name-services/ldap',
+ },
+ 'name-services/name-mappings': {
+ 'api_call': 'name-services/name-mappings',
+ },
+ 'name-services/nis': {
+ 'api_call': 'name-services/nis',
+ },
+ 'network/ethernet/broadcast-domains': {
+ 'api_call': 'network/ethernet/broadcast-domains',
+ },
+ 'network/ethernet/ports': {
+ 'api_call': 'network/ethernet/ports',
+ },
+ 'network/fc/logins': {
+ 'api_call': 'network/fc/logins',
+ },
+ 'network/fc/wwpn-aliases': {
+ 'api_call': 'network/fc/wwpn-aliases',
+ },
+ 'network/ip/interfaces': {
+ 'api_call': 'network/ip/interfaces',
+ },
+ 'network/ip/routes': {
+ 'api_call': 'network/ip/routes',
+ },
+ 'network/ip/service-policies': {
+ 'api_call': 'network/ip/service-policies',
+ },
+ 'network/ipspaces': {
+ 'api_call': 'network/ipspaces',
+ },
+ 'protocols/cifs/home-directory/search-paths': {
+ 'api_call': 'protocols/cifs/home-directory/search-paths',
+ },
+ 'protocols/cifs/services': {
+ 'api_call': 'protocols/cifs/services',
+ },
+ 'protocols/cifs/shares': {
+ 'api_call': 'protocols/cifs/shares',
+ },
+ 'protocols/san/fcp/services': {
+ 'api_call': 'protocols/san/fcp/services',
+ },
+ 'protocols/san/igroups': {
+ 'api_call': 'protocols/san/igroups',
+ },
+ 'protocols/san/iscsi/credentials': {
+ 'api_call': 'protocols/san/iscsi/credentials',
+ },
+ 'protocols/san/iscsi/services': {
+ 'api_call': 'protocols/san/iscsi/services',
+ },
+ 'protocols/san/lun-maps': {
+ 'api_call': 'protocols/san/lun-maps',
+ },
+ 'security/accounts': {
+ 'api_call': 'security/accounts',
+ },
+ 'security/roles': {
+ 'api_call': 'security/roles',
+ },
+ 'storage/aggregates': {
+ 'api_call': 'storage/aggregates',
+ },
+ 'storage/disks': {
+ 'api_call': 'storage/disks',
+ },
+ 'storage/flexcache/flexcaches': {
+ 'api_call': 'storage/flexcache/flexcaches',
+ },
+ 'storage/flexcache/origins': {
+ 'api_call': 'storage/flexcache/origins',
+ },
+ 'storage/luns': {
+ 'api_call': 'storage/luns',
+ },
+ 'storage/namespaces': {
+ 'api_call': 'storage/namespaces',
+ },
+ 'storage/ports': {
+ 'api_call': 'storage/ports',
+ },
+ 'storage/qos/policies': {
+ 'api_call': 'storage/qos/policies',
+ },
+ 'storage/qtrees': {
+ 'api_call': 'storage/qtrees',
+ },
+ 'storage/quota/reports': {
+ 'api_call': 'storage/quota/reports',
+ },
+ 'storage/quota/rules': {
+ 'api_call': 'storage/quota/rules',
+ },
+ 'storage/shelves': {
+ 'api_call': 'storage/shelves',
+ },
+ 'storage/snapshot-policies': {
+ 'api_call': 'storage/snapshot-policies',
+ },
+ 'storage/volumes': {
+ 'api_call': 'storage/volumes',
+ },
+ 'support/autosupport': {
+ 'api_call': 'support/autosupport',
+ },
+ 'support/autosupport/messages': {
+ 'api_call': 'support/autosupport/messages',
+ },
+ 'support/ems': {
+ 'api_call': 'support/ems',
+ },
+ 'support/ems/destinations': {
+ 'api_call': 'support/ems/destinations',
+ },
+ 'support/ems/events': {
+ 'api_call': 'support/ems/events',
+ },
+ 'support/ems/filters': {
+ 'api_call': 'support/ems/filters',
+ },
+ 'svm/peers': {
+ 'api_call': 'svm/peers',
+ },
+ 'svm/peer-permissions': {
+ 'api_call': 'svm/peer-permissions',
+ },
+ 'svm/svms': {
+ 'api_call': 'svm/svms',
+ }
+ }
+
+ if 'all' in self.parameters['gather_subset']:
+ # If all in subset list, get the information of all subsets
+ self.parameters['gather_subset'] = sorted(get_ontap_subset_info.keys())
+
+ length_of_subsets = len(self.parameters['gather_subset'])
+
+ if self.parameters.get('fields') is not None:
+ # If multiple fields specified to return, convert list to string
+ self.fields = ','.join(self.parameters.get('fields'))
+
+ if self.fields != '*' and length_of_subsets > 1:
+ # Restrict gather subsets to one subset if fields section is list_of_fields
+ self.module.fail_json(msg="Error: fields: %s, only one subset will be allowed." % self.parameters.get('fields'))
+ converted_subsets = self.convert_subsets()
+
+ for subset in converted_subsets:
+ try:
+ # Verify whether the supported subset passed
+ specified_subset = get_ontap_subset_info[subset]
+ except KeyError:
+ self.module.fail_json(msg="Specified subset %s is not found, supported subsets are %s" %
+ (subset, list(get_ontap_subset_info.keys())))
+
+ result_message[subset] = self.get_subset_info(specified_subset)
+
+ if result_message[subset] is not None:
+ if isinstance(result_message[subset], dict):
+ while result_message[subset]['_links'].get('next'):
+ # Get all the set of records if next link found in subset_info for the specified subset
+ next_api = result_message[subset]['_links']['next']['href']
+ gathered_subset_info = self.get_next_records(next_api.replace('/api', ''))
+
+ # Update the subset info for the specified subset
+ result_message[subset]['_links'] = gathered_subset_info['_links']
+ result_message[subset]['records'].extend(gathered_subset_info['records'])
+
+ # metrocluster doesn't have a records field, so we need to skip this
+ if result_message[subset].get('records') is not None:
+ # Getting total number of records
+ result_message[subset]['num_records'] = len(result_message[subset]['records'])
+
+ self.module.exit_json(changed='False', state=self.parameters['state'], ontap_info=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ obj = NetAppONTAPGatherInfo()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py
new file mode 100644
index 00000000..9cc4b3ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Call a REST API on ONTAP.
+ - Cluster REST API are run using a cluster admin account.
+ - Vserver REST API can be run using a vsadmin account or using vserver tunneling (cluster admin with I(vserver_) options).
+ - In case of success, a json dictionary is returned as C(response).
+ - In case of a REST API error, C(status_code), C(error_code), C(error_message) are set to help with diagnosing the issue,
+ - and the call is reported as an error ('failed').
+ - Other errors (eg connection issues) are reported as Ansible error.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_restit
+short_description: NetApp ONTAP Run any REST API on ONTAP
+version_added: "20.4.0"
+options:
+ api:
+ description:
+ - The REST API to call (eg I(cluster/software), I(svms/svm)).
+ required: true
+ type: str
+ method:
+ description:
+ - The REST method to use.
+ default: GET
+ type: str
+ query:
+ description:
+ - A list of dictionaries for the query parameters
+ type: dict
+ body:
+ description:
+ - A dictionary for the info parameter
+ type: dict
+ aliases: ['info']
+ vserver_name:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+ vserver_uuid:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+ hal_linking:
+ description:
+ - if true, HAL-encoded links are returned in the response.
+ default: false
+ type: bool
+'''
+
+EXAMPLES = """
+-
+ name: Ontap REST API
+ hosts: localhost
+ gather_facts: False
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ admin_ip }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ https: true
+ validate_certs: false
+ svm_login: &svm_login
+ hostname: "{{ svm_admin_ip }}"
+ username: "{{ svm_admin_username }}"
+ password: "{{ svm_admin_password }}"
+ https: true
+ validate_certs: false
+
+ tasks:
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: cluster/software
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: cluster/software
+ query:
+ fields: version
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: svm/svms
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: svm/svms
+ query:
+ fields: aggregates,cifs,nfs,uuid
+ query_fields: name
+ query: trident_svm
+ hal_linking: true
+ register: result
+ - debug: var=result
+
+ - name: run ontap REST API command as vsadmin
+ na_ontap_restit:
+ <<: *svm_login
+ api: svm/svms
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: storage/volumes
+ vserver_name: ansibleSVM
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+ - set_fact:
+ uuid: "{{ result.response.records | json_query(get_uuid) }}"
+ vars:
+ get_uuid: "[? name=='deleteme_ln1'].uuid"
+ - debug: var=uuid
+
+ - name: run ontap REST API command as DELETE method with vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: "storage/volumes/{{ uuid[0] }}"
+ method: DELETE
+ vserver_name: ansibleSVM
+ query:
+ return_timeout: 60
+ register: result
+ when: uuid|length == 1
+ - debug: var=result
+ - assert: { that: result.skipped|default(false) or result.status_code|default(404) == 200, quiet: True }
+
+ - name: run ontap REST API command as POST method with vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: storage/volumes
+ method: POST
+ vserver_name: ansibleSVM
+ query:
+ return_records: "true"
+ return_timeout: 60
+ body:
+ name: deleteme_ln1
+ aggregates:
+ - name: aggr1
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==201, quiet: True }
+
+ - name: run ontap REST API command as DELETE method with vserver tunneling
+ # delete test volume if present
+ na_ontap_restit:
+ <<: *login
+ api: "storage/volumes/{{ result.response.records[0].uuid }}"
+ method: DELETE
+ vserver_name: ansibleSVM
+ query:
+ return_timeout: 60
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+# error cases
+ - name: run ontap REST API command
+ na_ontap_restit:
+ <<: *login
+ api: unknown/endpoint
+ register: result
+ ignore_errors: True
+ - debug: var=result
+ - assert: { that: result.status_code==404, quiet: True }
+
+"""
+
+RETURN = """
+response:
+ description:
+ - If successful, a json dictionary returned by the REST API.
+ - If the REST API was executed but failed, an empty dictionary.
+ - Not present if the REST API call cannot be performed.
+ returned: On success
+ type: dict
+status_code:
+ description:
+ - The http status code.
+ returned: Always
+ type: str
+error_code:
+ description:
+ - If the REST API was executed but failed, the error code set by the REST API.
+ - Not present if successful, or if the REST API call cannot be performed.
+ returned: On error
+ type: str
+error_message:
+ description:
+ - If the REST API was executed but failed, the error message set by the REST API.
+ - Not present if successful, or if the REST API call cannot be performed.
+ returned: On error
+ type: str
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPRestAPI(object):
+ ''' calls a REST API command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ api=dict(required=True, type='str'),
+ method=dict(required=False, type='str', default='GET'),
+ query=dict(required=False, type='dict'),
+ body=dict(required=False, type='dict', aliases=['info']),
+ vserver_name=dict(required=False, type='str'),
+ vserver_uuid=dict(required=False, type='str'),
+ hal_linking=dict(required=False, type='bool', default=False),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.api = parameters['api']
+ self.method = parameters['method']
+ self.query = parameters['query']
+ self.body = parameters['body']
+ self.vserver_name = parameters['vserver_name']
+ self.vserver_uuid = parameters['vserver_uuid']
+ self.hal_linking = parameters['hal_linking']
+
+ self.rest_api = OntapRestAPI(self.module)
+
+ def run_api(self):
+ ''' calls the REST API '''
+ # TODO, log usage
+
+ if self.hal_linking:
+ content_type = 'application/hal+json'
+ else:
+ content_type = 'application/json'
+ status, response, error = self.rest_api.send_request(self.method, self.api, self.query, self.body,
+ accept=content_type,
+ vserver_name=self.vserver_name, vserver_uuid=self.vserver_uuid)
+ if error:
+ if isinstance(error, dict):
+ error_message = error.pop('message', None)
+ error_code = error.pop('code', None)
+ if not error:
+ # we exhausted the dictionary
+ error = 'check error_message and error_code for details.'
+ else:
+ error_message = error
+ error_code = None
+
+ msg = "Error when calling '%s': %s" % (self.api, str(error))
+ self.module.fail_json(msg=msg, status_code=status, response=response, error_message=error_message, error_code=error_code)
+
+ return status, response
+
+ def apply(self):
+ ''' calls the api and returns json output '''
+ status_code, response = self.run_api()
+ self.module.exit_json(changed=True, status_code=status_code, response=response)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ restapi = NetAppONTAPRestAPI()
+ restapi.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py
new file mode 100644
index 00000000..2561029e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py
@@ -0,0 +1,455 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_security_certificates
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_security_certificates
+short_description: NetApp ONTAP manage security certificates.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.7.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Install or delete security certificates on ONTAP. (Create and sign will come in a second iteration)
+
+options:
+
+ state:
+ description:
+ - Whether the specified security certificate should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ common_name:
+ description:
+ - Common name of the certificate.
+ - Required for create and install.
+ - If name is present, ignored for sign and delete.
+ - If name is absent or ignored, required for sign and delete.
+ type: str
+
+ name:
+ description:
+ - The unique name of the security certificate per SVM.
+ - This parameter is not supported for ONTAP 9.6 or 9.7, as the REST API does not support it.
+ - If present with ONTAP 9.6 or 9.7, it is ignored by default, see I(ignore_name_if_not_supported).
+ - It is strongly recommended to use name for newer releases of ONTAP.
+ type: str
+
+ svm:
+ description:
+ - The name of the SVM (vserver).
+ - If present, the certificate is installed in the SVM.
+ - If absent, the certificate is installed in the cluster.
+ type: str
+ aliases:
+ - vserver
+
+ type:
+ description:
+ - Type of certificate
+ - Required for create and install.
+ - If name is present, ignored for sign and delete.
+ - If name is absent or ignored, required for sign and delete.
+ choices: ['client', 'server', 'client_ca', 'server_ca', 'root_ca']
+ type: str
+
+ public_certificate:
+ description:
+ - Public key certificate in PEM format.
+ - Required when installing a certificate. Ignored otherwise.
+ type: str
+
+ private_key:
+ description:
+ - Private key certificate in PEM format.
+ - Required when installing a CA-signed certificate. Ignored otherwise.
+ type: str
+
+ signing_request:
+ description:
+ - If present, the certificate identified by name and svm is used to sign the request.
+ - A signed certificate is returned.
+ type: str
+
+ expiry_time:
+ description:
+ - Certificate expiration time. Specifying an expiration time is recommended when creating a certificate.
+ - Can be provided when signing a certificate.
+ type: str
+
+ key_size:
+ description:
+ - Key size of the certificate in bits. Specifying a strong key size is recommended when creating a certificate.
+ - Ignored for sign and delete.
+ type: int
+
+ hash_function:
+ description:
+ - Hashing function. Can be provided when creating a self-signed certificate or when signing a certificate.
+ - Allowed values for create and sign are sha256, sha224, sha384, sha512.
+ type: str
+
+ intermediate_certificates:
+ description:
+ - Chain of intermediate Certificates in PEM format.
+ - Only valid when installing a certificate.
+ type: list
+ elements: str
+
+ ignore_name_if_not_supported:
+ description:
+ - ONTAP 9.6 and 9.7 REST API does not support I(name).
+ - If set to true, no error is reported if I(name) is present, and I(name) is not used.
+ type: bool
+ default: true
+ version_added: '20.8.0'
+
+'''
+
+EXAMPLES = """
+- name: install certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_common_name }}"
+ name: "{{ ontap_cert_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+ svm: "{{ vserver }}"
+
+- name: create certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ name: "{{ ontap_cert_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ expiry_time: P365DT # one year
+
+- name: sign certificate using newly create certificate
+ tags: sign_request
+ na_ontap_security_certificates:
+ # <<: *login
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+ signing_request: |
+ -----BEGIN CERTIFICATE REQUEST-----
+ MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
+ DAlTdW5ueXZhbGUxDzANBgNVBAoMBk5ldEFwcDCCASIwDQYJKoZIhvcNAQEBBQAD
+ ggEPADCCAQoCggEBALgXCj6Si/I4xLdV7wjWYTbt8jY20fQOjk/4E7yBT1vFBflE
+ ks6YDc6dhC2G18cnoj9E3DiR8lIHPoAlFB/VmBNDev3GZkbFlrbV7qYmf8OEx2H2
+ tAefgSP0jLmCHCN1yyhJoCG6FsAiD3tf6yoyFF6qS9ureGL0tCJJ/osx64WzUz+Q
+ EN8lx7VSxriEFMSjreXZDhUFaCdIYKKRENuEWyYvdy5cbBmczhuM8EP6peOVv5Hm
+ BJzPUDkq7oTtEHmttpATq2Y92qzNzETO0bXN5X/93AWri8/yEXdX+HEw1C/omtsE
+ jGsCXrCrIJ+DgUdT/GHNdBWlXl/cWGtEgEQ4vrUCAwEAAaAAMA0GCSqGSIb3DQEB
+ CwUAA4IBAQBjZNoQgr/JDm1T8zyRhLkl3zw4a16qKNu/MS7prqZHLVQgrptHRegU
+ Hbz11XoHfVOdbyuvtzEe95QsDd6FYCZ4qzZRF3se4IjMeqwdQZ5WP0/GFiwM8Uln
+ /0TCWjt759XMeUX7+wgOg5NRjJ660eWMXzu/UJf+vZO0Q2FiPIr13JvvY3TjT+9J
+ UUtK4r9PaUuOPN2YL9IQqSD3goh8302Qr3nBXUgjeUGLkgfUM5S39apund2hyTX2
+ JCLQsKr88pwU9iDho2tHLv/2QgLwNZLPu8V+7IGu6G4vB28lN4Uy7xbhxFOKtyWu
+ fK4sEdTw3B/aDN0tB8MHFdPYycNZsEac
+ -----END CERTIFICATE REQUEST-----
+ expiry_time: P180DT
+
+- name: delete certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ state: absent
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+
+# For ONTAP 9.6 or 9.7, use common_name and type, in addition to, or in lieu of name
+- name: install certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_common_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+ svm: "{{ vserver }}"
+
+- name: create certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ expiry_time: P365DT # one year
+
+- name: sign certificate using newly create certificate
+ tags: sign_request
+ na_ontap_security_certificates:
+ # <<: *login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ signing_request: |
+ -----BEGIN CERTIFICATE REQUEST-----
+ MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
+ DAlTdW5ueXZhbGUxDzANBgNVBAoMBk5ldEFwcDCCASIwDQYJKoZIhvcNAQEBBQAD
+ ggEPADCCAQoCggEBALgXCj6Si/I4xLdV7wjWYTbt8jY20fQOjk/4E7yBT1vFBflE
+ ks6YDc6dhC2G18cnoj9E3DiR8lIHPoAlFB/VmBNDev3GZkbFlrbV7qYmf8OEx2H2
+ tAefgSP0jLmCHCN1yyhJoCG6FsAiD3tf6yoyFF6qS9ureGL0tCJJ/osx64WzUz+Q
+ EN8lx7VSxriEFMSjreXZDhUFaCdIYKKRENuEWyYvdy5cbBmczhuM8EP6peOVv5Hm
+ BJzPUDkq7oTtEHmttpATq2Y92qzNzETO0bXN5X/93AWri8/yEXdX+HEw1C/omtsE
+ jGsCXrCrIJ+DgUdT/GHNdBWlXl/cWGtEgEQ4vrUCAwEAAaAAMA0GCSqGSIb3DQEB
+ CwUAA4IBAQBjZNoQgr/JDm1T8zyRhLkl3zw4a16qKNu/MS7prqZHLVQgrptHRegU
+ Hbz11XoHfVOdbyuvtzEe95QsDd6FYCZ4qzZRF3se4IjMeqwdQZ5WP0/GFiwM8Uln
+ /0TCWjt759XMeUX7+wgOg5NRjJ660eWMXzu/UJf+vZO0Q2FiPIr13JvvY3TjT+9J
+ UUtK4r9PaUuOPN2YL9IQqSD3goh8302Qr3nBXUgjeUGLkgfUM5S39apund2hyTX2
+ JCLQsKr88pwU9iDho2tHLv/2QgLwNZLPu8V+7IGu6G4vB28lN4Uy7xbhxFOKtyWu
+ fK4sEdTw3B/aDN0tB8MHFdPYycNZsEac
+ -----END CERTIFICATE REQUEST-----
+ expiry_time: P180DT
+
+- name: delete certificate
+ na_ontap_security_certificates:
+ # <<: *cert_login
+ state: absent
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+"""
+
+RETURN = """
+ontap_info:
+ description: Returns public_certificate when signing, empty for create, install, and delete.
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_info": {
+ "public_certificate": "-----BEGIN CERTIFICATE-----\n........-----END CERTIFICATE-----\n"
+ }
+ }'
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppOntapSecurityCertificates(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ common_name=dict(required=False, type='str'),
+ name=dict(required=False, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ type=dict(required=False, choices=['client', 'server', 'client_ca', 'server_ca', 'root_ca']),
+ svm=dict(required=False, type='str', aliases=['vserver']),
+ public_certificate=dict(required=False, type='str'),
+ private_key=dict(required=False, type='str'),
+ signing_request=dict(required=False, type='str'),
+ expiry_time=dict(required=False, type='str'),
+ key_size=dict(required=False, type='int'),
+ hash_function=dict(required=False, type='str'),
+ intermediate_certificates=dict(required=False, type='list', elements='str'),
+ ignore_name_if_not_supported=dict(required=False, type='bool', default=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if self.parameters.get('name') is None:
+ if self.parameters.get('common_name') is None or self.parameters.get('type') is None:
+ error = "'name' or ('common_name' and 'type') are required parameters."
+ self.module.fail_json(msg=error)
+
+ # ONTAP 9.6 and 9.7 do not support name. We'll change this to True if we detect an issue.
+ self.ignore_name_param = False
+
+ # API should be used for ONTAP 9.6 or higher
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_security_certificates'))
+
+ def get_certificate(self):
+ """
+ Fetch uuid if certificate exists.
+ NOTE: because of a bug in ONTAP 9.6 and 9.7, name is not supported. We are
+ falling back to using common_name and type, but unicity is not guaranteed.
+ :return:
+ Dictionary if certificate with same name is found
+ None if not found
+ """
+ error = "'name' or ('common_name', 'type') are required."
+ for key in ('name', 'common_name'):
+ if self.parameters.get(key) is None:
+ continue
+ data = {'fields': 'uuid',
+ key: self.parameters[key],
+ }
+ if self.parameters.get('svm') is not None:
+ data['svm.name'] = self.parameters['svm']
+ else:
+ data['scope'] = 'cluster'
+ if key == 'common_name':
+ if self.parameters.get('type') is not None:
+ data['type'] = self.parameters['type']
+ else:
+ error = "When using 'common_name', 'type' is required."
+ break
+
+ api = "security/certificates"
+ message, error = self.rest_api.get(api, data)
+ if error:
+ try:
+ name_not_supported_error = (key == 'name') and (error['message'] == 'Unexpected argument "name".')
+ except (KeyError, TypeError):
+ name_not_supported_error = False
+ if name_not_supported_error:
+ if self.parameters['ignore_name_if_not_supported'] and self.parameters.get('common_name') is not None:
+ # let's attempt a retry using common_name
+ self.ignore_name_param = True
+ continue
+ error = "ONTAP 9.6 and 9.7 do not support 'name'. Use 'common_name' and 'type' as a work-around."
+ # report success, or any other error as is
+ break
+
+ if error:
+ self.module.fail_json(msg='Error calling API: %s - %s' % (api, error))
+
+ if len(message['records']) == 1:
+ return message['records'][0]
+ if len(message['records']) > 1:
+ error = 'Duplicate records with same common_name are preventing safe operations: %s' % repr(message)
+ self.module.fail_json(msg=error)
+ return None
+
+ def create_or_install_certificate(self):
+ """
+ Create or install certificate
+ :return: message (should be empty dict)
+ """
+ required_keys = ['type', 'common_name']
+ optional_keys = ['public_certificate', 'private_key', 'expiry_time', 'key_size', 'hash_function']
+ if not self.ignore_name_param:
+ optional_keys.append('name')
+ # special key: svm
+
+ if not set(required_keys).issubset(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error creating or installing certificate: one or more of the following options are missing: %s'
+ % (', '.join(required_keys)))
+
+ data = dict()
+ if self.parameters.get('svm') is not None:
+ data['svm'] = {'name': self.parameters['svm']}
+ for key in required_keys + optional_keys:
+ if self.parameters.get(key) is not None:
+ data[key] = self.parameters[key]
+ api = "security/certificates"
+ message, error = self.rest_api.post(api, data)
+ if error:
+ if self.parameters.get('svm') is None and error.get('target') == 'uuid':
+ error['target'] = 'cluster'
+ if error.get('message') == 'duplicate entry':
+ error['message'] += '. Same certificate may already exist under a different name.'
+ self.module.fail_json(msg="Error creating or installing certificate: %s" % error)
+ return message
+
+ def sign_certificate(self, uuid):
+ """
+ sign certificate
+ :return: a dictionary with key "public_certificate"
+ """
+ api = "security/certificates/%s/sign" % uuid
+ data = {'signing_request': self.parameters['signing_request']}
+ optional_keys = ['expiry_time', 'hash_function']
+ for key in optional_keys:
+ if self.parameters.get(key) is not None:
+ data[key] = self.parameters[key]
+ message, error = self.rest_api.post(api, data)
+ if error:
+ self.module.fail_json(msg="Error signing certificate: %s" % error)
+ return message
+
+ def delete_certificate(self, uuid):
+ """
+ Delete certificate
+ :return: message (should be empty dict)
+ """
+ api = "security/certificates/%s" % uuid
+ message, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg="Error deleting certificate: %s" % error)
+ return message
+
+ def apply(self):
+ """
+ Apply action to create/install/sign/delete certificate
+ :return: None
+ """
+ # TODO: add telemetry for REST
+
+ current = self.get_certificate()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ message = None
+ if self.parameters.get('signing_request') is not None:
+ error = None
+ if self.parameters['state'] == 'absent':
+ error = "'signing_request' is not supported with 'state' set to 'absent'"
+ elif current is None:
+ scope = 'cluster' if self.parameters.get('svm') is None else "svm: %s" % self.parameters.get('svm')
+ error = "signing certificate with name '%s' not found on %s" % (self.parameters.get('name'), scope)
+ elif cd_action is not None:
+ error = "'signing_request' is exclusive with other actions: create, install, delete"
+ if error is not None:
+ self.module.fail_json(msg=error)
+ self.na_helper.changed = True
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ message = self.create_or_install_certificate()
+ elif cd_action == 'delete':
+ message = self.delete_certificate(current['uuid'])
+ elif self.parameters.get('signing_request') is not None:
+ message = self.sign_certificate(current['uuid'])
+
+ results = {'changed': self.na_helper.changed}
+ if message:
+ results['ontap_info'] = message
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Create instance and invoke apply
+ :return: None
+ """
+ sec_cert = NetAppOntapSecurityCertificates()
+ sec_cert.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py
new file mode 100644
index 00000000..26e3b5e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_security_key_manager
+
+short_description: NetApp ONTAP security key manager.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Add or delete or setup key management on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified key manager should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ ip_address:
+ description:
+ - The IP address of the key management server.
+ required: true
+ type: str
+
+ tcp_port:
+ description:
+ - The TCP port on which the key management server listens for incoming connections.
+ default: 5696
+ type: int
+
+ node:
+ description:
+ - The node which key management server runs on.
+ type: str
+
+'''
+
+EXAMPLES = """
+
+ - name: Delete Key Manager
+ tags:
+ - delete
+ na_ontap_security_key_manager:
+ state: absent
+ node: swenjun-vsim1
+ hostname: "{{ hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ ip_address: 0.0.0.0
+
+ - name: Add Key Manager
+ tags:
+ - add
+ na_ontap_security_key_manager:
+ state: present
+ node: swenjun-vsim1
+ hostname: "{{ hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ ip_address: 0.0.0.0
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSecurityKeyManager(object):
+ '''class with key manager operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ ip_address=dict(required=True, type='str'),
+ node=dict(required=False, type='str'),
+ tcp_port=dict(required=False, type='int', default=5696)
+ )
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required"
+ )
+ else:
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_key_manager(self):
+ """
+ get key manager by ip address.
+ :return: a dict of key manager
+ """
+ key_manager_info = netapp_utils.zapi.NaElement('security-key-manager-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'key-manager-info', **{'key-manager-ip-address': self.parameters['ip_address']})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ key_manager_info.add_child_elem(query)
+
+ try:
+ result = self.cluster.invoke_successfully(key_manager_info, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching key manager %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ key_manager = result.get_child_by_name('attributes-list').get_child_by_name('key-manager-info')
+ return_value = {}
+ if key_manager.get_child_by_name('key-manager-ip-address'):
+ return_value['ip_address'] = key_manager.get_child_content('key-manager-ip-address')
+ if key_manager.get_child_by_name('key-manager-server-status'):
+ return_value['server_status'] = key_manager.get_child_content('key-manager-server-status')
+ if key_manager.get_child_by_name('key-manager-tcp-port'):
+ return_value['tcp_port'] = key_manager.get_child_content('key-manager-tcp-port')
+ if key_manager.get_child_by_name('node-name'):
+ return_value['node'] = key_manager.get_child_content('node-name')
+
+ return return_value
+
+ def key_manager_setup(self):
+ """
+ set up external key manager.
+ """
+ key_manager_setup = netapp_utils.zapi.NaElement('security-key-manager-setup')
+ # if specify on-boarding passphrase, it is on-boarding key management.
+ # it not, then it's external key management.
+ try:
+ self.cluster.invoke_successfully(key_manager_setup, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting up key manager %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_key_manager(self):
+ """
+ add key manager.
+ """
+ key_manager_create = netapp_utils.zapi.NaElement('security-key-manager-add')
+ key_manager_create.add_new_child('key-manager-ip-address', self.parameters['ip_address'])
+ if self.parameters.get('tcp_port'):
+ key_manager_create.add_new_child('key-manager-tcp-port', str(self.parameters['tcp_port']))
+ try:
+ self.cluster.invoke_successfully(key_manager_create, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating key manager %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_key_manager(self):
+ """
+ delete key manager.
+ """
+ key_manager_delete = netapp_utils.zapi.NaElement('security-key-manager-delete')
+ key_manager_delete.add_new_child('key-manager-ip-address', self.parameters['ip_address'])
+ try:
+ self.cluster.invoke_successfully(key_manager_delete, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting key manager %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_security_key_manager")
+ self.key_manager_setup()
+ current = self.get_key_manager()
+ cd_action = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_key_manager()
+ elif cd_action == 'delete':
+ self.delete_key_manager()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.cluster)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapSecurityKeyManager()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py
new file mode 100644
index 00000000..63de30cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_service_processor_network
+short_description: NetApp ONTAP service processor network
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify a ONTAP service processor network
+options:
+ state:
+ description:
+ - Whether the specified service processor network should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+ address_type:
+ description:
+ - Specify address class.
+ required: true
+ type: str
+ choices: ['ipv4', 'ipv6']
+ is_enabled:
+ description:
+ - Specify whether to enable or disable the service processor network.
+ required: true
+ type: bool
+ node:
+ description:
+ - The node where the service processor network should be enabled
+ required: true
+ type: str
+ dhcp:
+ description:
+ - Specify dhcp type.
+ type: str
+ choices: ['v4', 'none']
+ gateway_ip_address:
+ description:
+ - Specify the gateway ip.
+ type: str
+ ip_address:
+ description:
+ - Specify the service processor ip address.
+ type: str
+ netmask:
+ description:
+ - Specify the service processor netmask.
+ type: str
+ prefix_length:
+ description:
+ - Specify the service processor prefix_length.
+ type: int
+ wait_for_completion:
+ description:
+ - Set this parameter to 'true' for synchronous execution (wait until SP status is successfully updated)
+ - Set this parameter to 'false' for asynchronous execution
+ - For asynchronous, execution exits as soon as the request is sent, without checking SP status
+ type: bool
+ default: false
+ version_added: 2.8.0
+'''
+
+EXAMPLES = """
+ - name: Modify Service Processor Network
+ na_ontap_service_processor_network:
+ state: present
+ address_type: ipv4
+ is_enabled: true
+ dhcp: v4
+ node: "{{ netapp_node }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import time
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapServiceProcessorNetwork(object):
+ """
+ Modify a Service Processor Network
+ """
+
+ def __init__(self):
+ """
+ Initialize the NetAppOntapServiceProcessorNetwork class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ address_type=dict(required=True, type='str', choices=['ipv4', 'ipv6']),
+ is_enabled=dict(required=True, type='bool'),
+ node=dict(required=True, type='str'),
+ dhcp=dict(required=False, type='str', choices=['v4', 'none']),
+ gateway_ip_address=dict(required=False, type='str'),
+ ip_address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ prefix_length=dict(required=False, type='int'),
+ wait_for_completion=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=None)
+ return
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'address_type': 'address-type',
+ 'node': 'node',
+ 'dhcp': 'dhcp',
+ 'gateway_ip_address': 'gateway-ip-address',
+ 'ip_address': 'ip-address',
+ 'netmask': 'netmask'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'prefix_length': 'prefix-length'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'is_enabled': 'is-enabled',
+ }
+ self.na_helper.zapi_required = {
+ 'address_type': 'address-type',
+ 'node': 'node',
+ 'is_enabled': 'is-enabled'
+ }
+
+ def get_sp_network_status(self):
+ """
+ Return status of service processor network
+ :param:
+ name : name of the node
+ :return: Status of the service processor network
+ :rtype: dict
+ """
+ spn_get_iter = netapp_utils.zapi.NaElement('service-processor-network-get-iter')
+ query_info = {
+ 'query': {
+ 'service-processor-network-info': {
+ 'node': self.parameters['node'],
+ 'address-type': self.parameters['address_type']
+ }
+ }
+ }
+ spn_get_iter.translate_struct(query_info)
+ result = self.server.invoke_successfully(spn_get_iter, True)
+ if int(result['num-records']) >= 1:
+ sp_attr_info = result['attributes-list']['service-processor-network-info']
+ return sp_attr_info.get_child_content('setup-status')
+ return None
+
+ def get_service_processor_network(self):
+ """
+ Return details about service processor network
+ :param:
+ name : name of the node
+ :return: Details about service processor network. None if not found.
+ :rtype: dict
+ """
+ spn_get_iter = netapp_utils.zapi.NaElement('service-processor-network-get-iter')
+ query_info = {
+ 'query': {
+ 'service-processor-network-info': {
+ 'node': self.parameters['node']
+ }
+ }
+ }
+ spn_get_iter.translate_struct(query_info)
+ result = self.server.invoke_successfully(spn_get_iter, True)
+ sp_details = None
+ # check if job exists
+ if int(result['num-records']) >= 1:
+ sp_details = dict()
+ sp_attr_info = result['attributes-list']['service-processor-network-info']
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ sp_details[item_key] = sp_attr_info.get_child_content(zapi_key)
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ sp_details[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=sp_attr_info.get_child_content(zapi_key))
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ sp_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=sp_attr_info.get_child_content(zapi_key))
+ return sp_details
+
+ def modify_service_processor_network(self, params=None):
+ """
+ Modify a service processor network.
+ :param params: A dict of modified options.
+ When dhcp is not set to v4, ip_address, netmask, and gateway_ip_address must be specified even if remains the same.
+ """
+ if self.parameters['is_enabled'] is False:
+ if params.get('is_enabled') and len(params) > 1:
+ self.module.fail_json(msg='Error: Cannot modify any other parameter for a service processor network if option "is_enabled" is set to false.')
+ elif params.get('is_enabled') is None and len(params) > 0:
+ self.module.fail_json(msg='Error: Cannot modify a service processor network if it is disabled.')
+
+ sp_modify = netapp_utils.zapi.NaElement('service-processor-network-modify')
+ sp_modify.add_new_child("node", self.parameters['node'])
+ sp_modify.add_new_child("address-type", self.parameters['address_type'])
+ sp_attributes = dict()
+ for item_key in self.parameters:
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ sp_attributes[zapi_key] = self.parameters[item_key]
+ elif item_key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item_key)
+ sp_attributes[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters[item_key])
+ elif item_key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(item_key)
+ sp_attributes[zapi_key] = self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters[item_key])
+ sp_modify.translate_struct(sp_attributes)
+ try:
+ self.server.invoke_successfully(sp_modify, enable_tunneling=True)
+ if self.parameters.get('wait_for_completion'):
+ retries = 10
+ while self.get_sp_network_status() == 'in_progress' and retries > 0:
+ time.sleep(10)
+ retries = retries - 1
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying service processor network: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_service_processor_network", cserver)
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ self.autosupport_log()
+ current = self.get_service_processor_network()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if not current:
+ self.module.fail_json(msg='Error No Service Processor for node: %s' % self.parameters['node'])
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.modify_service_processor_network(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Create the NetApp Ontap Service Processor Network Object and modify it
+ """
+
+ obj = NetAppOntapServiceProcessorNetwork()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py
new file mode 100644
index 00000000..00338142
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py
@@ -0,0 +1,895 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Update/Initialize/Break/Resync/Resume SnapMirror volume/vserver relationships for ONTAP/ONTAP
+ - Create/Delete/Update/Initialize SnapMirror volume relationship between ElementSW and ONTAP
+ - Modify schedule for a SnapMirror relationship for ONTAP/ONTAP and ElementSW/ONTAP
+ - Pre-requisite for ElementSW to ONTAP relationship or vice-versa is an established SnapMirror endpoint for ONTAP cluster with ElementSW UI
+ - Pre-requisite for ElementSW to ONTAP relationship or vice-versa is to have SnapMirror enabled in the ElementSW volume
+ - For creating a SnapMirror ElementSW/ONTAP relationship, an existing ONTAP/ElementSW relationship should be present
+ - Performs resync if the C(relationship_state=active) and the current mirror state of the snapmirror relationship is broken-off
+ - Performs resume if the C(relationship_state=active), the current snapmirror relationship status is quiesced and mirror state is snapmirrored
+ - Performs restore if the C(relationship_type=restore) and all other operations will not be performed during this task
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_snapmirror
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified relationship should exist or not.
+ default: present
+ type: str
+ source_volume:
+ description:
+ - Specifies the name of the source volume for the SnapMirror.
+ type: str
+ destination_volume:
+ description:
+ - Specifies the name of the destination volume for the SnapMirror.
+ type: str
+ source_vserver:
+ description:
+ - Name of the source vserver for the SnapMirror.
+ type: str
+ destination_vserver:
+ description:
+ - Name of the destination vserver for the SnapMirror.
+ type: str
+ source_path:
+ description:
+ - Specifies the source endpoint of the SnapMirror relationship.
+ - If the source is an ONTAP volume, format should be <[vserver:][volume]> or <[[cluster:]//vserver/]volume>
+ - If the source is an ElementSW volume, format should be <[Element_SVIP]:/lun/[Element_VOLUME_ID]>
+ - If the source is an ElementSW volume, the volume should have SnapMirror enabled.
+ type: str
+ destination_path:
+ description:
+ - Specifies the destination endpoint of the SnapMirror relationship.
+ type: str
+ relationship_type:
+ choices: ['data_protection', 'load_sharing', 'vault', 'restore', 'transition_data_protection',
+ 'extended_data_protection']
+ type: str
+ description:
+ - Specify the type of SnapMirror relationship.
+ - for 'restore' unless 'source_snapshot' is specified the most recent Snapshot copy on the source volume is restored.
+ - restore SnapMirror is not idempotent.
+ schedule:
+ description:
+ - Specify the name of the current schedule, which is used to update the SnapMirror relationship.
+ - Optional for create, modifiable.
+ type: str
+ policy:
+ description:
+ - Specify the name of the SnapMirror policy that applies to this relationship.
+ version_added: 2.8.0
+ type: str
+ source_hostname:
+ description:
+ - Source hostname or management IP address for ONTAP or ElementSW cluster.
+ - Required for SnapMirror delete
+ type: str
+ source_username:
+ description:
+ - Source username for ONTAP or ElementSW cluster.
+ - Optional if this is same as destination username.
+ type: str
+ source_password:
+ description:
+ - Source password for ONTAP or ElementSW cluster.
+ - Optional if this is same as destination password.
+ type: str
+ connection_type:
+ description:
+ - Type of SnapMirror relationship.
+ - Pre-requisite for either elementsw_ontap or ontap_elementsw the ElementSW volume should have enableSnapmirror option set to true.
+ - For using ontap_elementsw, elementsw_ontap snapmirror relationship should exist.
+ choices: ['ontap_ontap', 'elementsw_ontap', 'ontap_elementsw']
+ default: ontap_ontap
+ type: str
+ version_added: 2.9.0
+ max_transfer_rate:
+ description:
+ - Specifies the upper bound, in kilobytes per second, at which data is transferred.
+ - Default is unlimited, it can be explicitly set to 0 as unlimited.
+ type: int
+ version_added: 2.9.0
+ initialize:
+ description:
+ - Specifies whether to initialize SnapMirror relation.
+ - Default is True, it can be explicitly set to False to avoid initializing SnapMirror relation.
+ default: true
+ type: bool
+ version_added: '19.11.0'
+ update:
+ description:
+ - Specifies whether to update the destination endpoint of the SnapMirror relationship only if the relationship is already present and active.
+ - Default is True.
+ default: true
+ type: bool
+ version_added: '20.2.0'
+ relationship_info_only:
+ description:
+ - If relationship-info-only is set to true then only relationship information is removed.
+ default: false
+ type: bool
+ version_added: '20.4.0'
+ relationship_state:
+ description:
+ - Specifies whether to break SnapMirror relation or establish a SnapMirror relationship.
+ - state must be present to use this option.
+ default: active
+ choices: ['active', 'broken']
+ type: str
+ version_added: '20.2.0'
+ source_snapshot:
+ description:
+ - Specifies the Snapshot from the source to be restored.
+ type: str
+ version_added: '20.6.0'
+ identity_preserve:
+ description:
+ - Specifies whether or not the identity of the source Vserver is replicated to the destination Vserver.
+ - If this parameter is set to true, the source Vserver's configuration will additionally be replicated to the destination.
+ - If the parameter is set to false, then only the source Vserver's volumes and RBAC configuration are replicated to the destination.
+ type: bool
+ version_added: 2.9.0
+short_description: "NetApp ONTAP or ElementSW Manage SnapMirror"
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ # creates and initializes the snapmirror
+ - name: Create ONTAP/ONTAP SnapMirror
+ na_ontap_snapmirror:
+ state: present
+ source_volume: test_src
+ destination_volume: test_dest
+ source_vserver: ansible_src
+ destination_vserver: ansible_dest
+ schedule: hourly
+ policy: MirrorAllSnapshots
+ max_transfer_rate: 1000
+ initialize: False
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ # creates and initializes the snapmirror between vservers
+ - name: Create ONTAP/ONTAP vserver SnapMirror
+ na_ontap_snapmirror:
+ state: present
+ source_vserver: ansible_src
+ destination_vserver: ansible_dest
+ identity_preserve: true
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ # existing snapmirror relation with status 'snapmirrored' will be initiailzed
+ - name: Inititalize ONTAP/ONTAP SnapMirror
+ na_ontap_snapmirror:
+ state: present
+ source_path: 'ansible:test'
+ destination_path: 'ansible:dest'
+ relationship_state: active
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Delete SnapMirror
+ na_ontap_snapmirror:
+ state: absent
+ destination_path: <path>
+ relationship_info_only: True
+ source_hostname: "{{ source_hostname }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Break Snapmirror
+ na_ontap_snapmirror:
+ state: present
+ relationship_state: broken
+ destination_path: <path>
+ source_hostname: "{{ source_hostname }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Restore Snapmirror volume using location (Idempotency)
+ na_ontap_snapmirror:
+ state: present
+ source_path: <path>
+ destination_path: <path>
+ relationship_type: restore
+ source_snapshot: "{{ snapshot }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Set schedule to NULL
+ na_ontap_snapmirror:
+ state: present
+ destination_path: <path>
+ schedule: ""
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Create SnapMirror from ElementSW to ONTAP
+ na_ontap_snapmirror:
+ state: present
+ connection_type: elementsw_ontap
+ source_path: '10.10.10.10:/lun/300'
+ destination_path: 'ansible_test:ansible_dest_vol'
+ schedule: hourly
+ policy: MirrorLatest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ source_hostname: " {{ Element_cluster_mvip }}"
+ source_username: "{{ Element_cluster_username }}"
+ source_password: "{{ Element_cluster_password }}"
+
+ - name: Create SnapMirror from ONTAP to ElementSW
+ na_ontap_snapmirror:
+ state: present
+ connection_type: ontap_elementsw
+ destination_path: '10.10.10.10:/lun/300'
+ source_path: 'ansible_test:ansible_dest_vol'
+ policy: MirrorLatest
+ hostname: "{{ Element_cluster_mvip }}"
+ username: "{{ Element_cluster_username }}"
+ password: "{{ Element_cluster_password }}"
+ source_hostname: " {{ netapp_hostname }}"
+ source_username: "{{ netapp_username }}"
+ source_password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import time
+import re
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class NetAppONTAPSnapmirror(object):
+ """
+ Class with Snapmirror methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ source_vserver=dict(required=False, type='str'),
+ destination_vserver=dict(required=False, type='str'),
+ source_volume=dict(required=False, type='str'),
+ destination_volume=dict(required=False, type='str'),
+ source_path=dict(required=False, type='str'),
+ destination_path=dict(required=False, type='str'),
+ schedule=dict(required=False, type='str'),
+ policy=dict(required=False, type='str'),
+ relationship_type=dict(required=False, type='str',
+ choices=['data_protection', 'load_sharing',
+ 'vault', 'restore',
+ 'transition_data_protection',
+ 'extended_data_protection']
+ ),
+ source_hostname=dict(required=False, type='str'),
+ connection_type=dict(required=False, type='str',
+ choices=['ontap_ontap', 'elementsw_ontap', 'ontap_elementsw'],
+ default='ontap_ontap'),
+ source_username=dict(required=False, type='str'),
+ source_password=dict(required=False, type='str', no_log=True),
+ max_transfer_rate=dict(required=False, type='int'),
+ initialize=dict(required=False, type='bool', default=True),
+ update=dict(required=False, type='bool', default=True),
+ identity_preserve=dict(required=False, type='bool'),
+ relationship_state=dict(required=False, type='str', choices=['active', 'broken'], default='active'),
+ relationship_info_only=dict(required=False, type='bool', default=False),
+ source_snapshot=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=(['source_volume', 'destination_volume'],
+ ['source_vserver', 'destination_vserver']),
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # setup later if required
+ self.source_server = None
+ # only for ElementSW -> ONTAP snapmirroring, validate if ElementSW SDK is available
+ if self.parameters.get('connection_type') in ['elementsw_ontap', 'ontap_elementsw']:
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ if self.parameters.get('connection_type') != 'ontap_elementsw':
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ else:
+ if self.parameters.get('source_username'):
+ self.module.params['username'] = self.parameters['source_username']
+ if self.parameters.get('source_password'):
+ self.module.params['password'] = self.parameters['source_password']
+ self.module.params['hostname'] = self.parameters['source_hostname']
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def set_element_connection(self, kind):
+ if kind == 'source':
+ self.module.params['hostname'] = self.parameters['source_hostname']
+ self.module.params['username'] = self.parameters['source_username']
+ self.module.params['password'] = self.parameters['source_password']
+ elif kind == 'destination':
+ self.module.params['hostname'] = self.parameters['hostname']
+ self.module.params['username'] = self.parameters['username']
+ self.module.params['password'] = self.parameters['password']
+ elem = netapp_utils.create_sf_connection(module=self.module)
+ elementsw_helper = NaElementSWModule(elem)
+ return elementsw_helper, elem
+
+ def snapmirror_get_iter(self, destination=None):
+ """
+ Compose NaElement object to query current SnapMirror relations using destination-path
+ SnapMirror relation for a destination path is unique
+ :return: NaElement object for SnapMirror-get-iter
+ """
+ snapmirror_get_iter = netapp_utils.zapi.NaElement('snapmirror-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ snapmirror_info = netapp_utils.zapi.NaElement('snapmirror-info')
+ if destination is None:
+ destination = self.parameters['destination_path']
+ snapmirror_info.add_new_child('destination-location', destination)
+ query.add_child_elem(snapmirror_info)
+ snapmirror_get_iter.add_child_elem(query)
+ return snapmirror_get_iter
+
+ def snapmirror_get(self, destination=None):
+ """
+ Get current SnapMirror relations
+ :return: Dictionary of current SnapMirror details if query successful, else None
+ """
+ snapmirror_get_iter = self.snapmirror_get_iter(destination)
+ snap_info = dict()
+ try:
+ result = self.server.invoke_successfully(snapmirror_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snapmirror info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ snapmirror_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'snapmirror-info')
+ snap_info['mirror_state'] = snapmirror_info.get_child_content('mirror-state')
+ snap_info['status'] = snapmirror_info.get_child_content('relationship-status')
+ snap_info['schedule'] = snapmirror_info.get_child_content('schedule')
+ snap_info['policy'] = snapmirror_info.get_child_content('policy')
+ snap_info['relationship'] = snapmirror_info.get_child_content('relationship-type')
+ if snapmirror_info.get_child_by_name('max-transfer-rate'):
+ snap_info['max_transfer_rate'] = int(snapmirror_info.get_child_content('max-transfer-rate'))
+ if snap_info['schedule'] is None:
+ snap_info['schedule'] = ""
+ return snap_info
+ return None
+
+ def check_if_remote_volume_exists(self):
+ """
+ Validate existence of source volume
+ :return: True if volume exists, False otherwise
+ """
+ self.set_source_cluster_connection()
+ # do a get volume to check if volume exists or not
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', self.parameters['source_volume'])
+ # if source_volume is present, then source_vserver is also guaranteed to be present
+ volume_id_attributes.add_new_child('vserver-name', self.parameters['source_vserver'])
+ volume_attributes.add_child_elem(volume_id_attributes)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+ volume_info.add_child_elem(query)
+ try:
+ result = self.source_server.invoke_successfully(volume_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching source volume details %s : %s'
+ % (self.parameters['source_volume'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ return True
+ return False
+
+ def snapmirror_create(self):
+ """
+ Create a SnapMirror relationship
+ """
+ if self.parameters.get('source_hostname') and self.parameters.get('source_volume'):
+ if not self.check_if_remote_volume_exists():
+ self.module.fail_json(msg='Source volume does not exist. Please specify a volume that exists')
+ options = {'source-location': self.parameters['source_path'],
+ 'destination-location': self.parameters['destination_path']}
+ snapmirror_create = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-create', **options)
+ if self.parameters.get('relationship_type'):
+ snapmirror_create.add_new_child('relationship-type', self.parameters['relationship_type'])
+ if self.parameters.get('schedule'):
+ snapmirror_create.add_new_child('schedule', self.parameters['schedule'])
+ if self.parameters.get('policy'):
+ snapmirror_create.add_new_child('policy', self.parameters['policy'])
+ if self.parameters.get('max_transfer_rate'):
+ snapmirror_create.add_new_child('max-transfer-rate', str(self.parameters['max_transfer_rate']))
+ if self.parameters.get('identity_preserve'):
+ snapmirror_create.add_new_child('identity-preserve', str(self.parameters['identity_preserve']))
+ try:
+ self.server.invoke_successfully(snapmirror_create, enable_tunneling=True)
+ if self.parameters['initialize']:
+ self.snapmirror_initialize()
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating SnapMirror %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ def set_source_cluster_connection(self):
+ """
+ Setup ontap ZAPI server connection for source hostname
+ :return: None
+ """
+ if self.parameters.get('source_username'):
+ self.module.params['username'] = self.parameters['source_username']
+ if self.parameters.get('source_password'):
+ self.module.params['password'] = self.parameters['source_password']
+ self.module.params['hostname'] = self.parameters['source_hostname']
+ self.source_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def delete_snapmirror(self, is_hci, relationship_type, mirror_state):
+ """
+ Delete a SnapMirror relationship
+ #1. Quiesce the SnapMirror relationship at destination
+ #2. Break the SnapMirror relationship at the destination
+ #3. Release the SnapMirror at source
+ #4. Delete SnapMirror at destination
+ """
+ if not is_hci:
+ if not self.parameters.get('source_hostname'):
+ self.module.fail_json(msg='Missing parameters for delete: Please specify the '
+ 'source cluster hostname to release the SnapMirror relationship')
+ # Quiesce and Break at destination
+ if relationship_type not in ['load_sharing', 'vault'] and mirror_state not in ['uninitialized', 'broken-off']:
+ self.snapmirror_break()
+ # if source is ONTAP, release the destination at source cluster
+ if not is_hci:
+ self.set_source_cluster_connection()
+ if self.get_destination():
+ # Release at source
+ self.snapmirror_release()
+ # Delete at destination
+ self.snapmirror_delete()
+
+ def snapmirror_quiesce(self):
+ """
+ Quiesce SnapMirror relationship - disable all future transfers to this destination
+ """
+ result = None
+ options = {'destination-location': self.parameters['destination_path']}
+
+ snapmirror_quiesce = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-quiesce', **options)
+ try:
+ result = self.server.invoke_successfully(snapmirror_quiesce, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Quiescing SnapMirror : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ # checking if quiesce was passed successfully
+ if result is not None and result['status'] == 'passed':
+ return
+ elif result is not None and result['status'] != 'passed':
+ retries = 5
+ while retries > 0:
+ time.sleep(5)
+ retries = retries - 1
+ status = self.snapmirror_get()
+ if status['status'] == 'quiesced':
+ return
+ if retries == 0:
+ self.module.fail_json(msg='Taking a long time to Quiescing SnapMirror, try again later')
+
+ def snapmirror_delete(self):
+ """
+ Delete SnapMirror relationship at destination cluster
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+
+ snapmirror_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-destroy', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting SnapMirror : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_break(self, destination=None):
+ """
+ Break SnapMirror relationship at destination cluster
+ #1. Quiesce the SnapMirror relationship at destination
+ #2. Break the SnapMirror relationship at the destination
+ """
+ self.snapmirror_quiesce()
+ if destination is None:
+ destination = self.parameters['destination_path']
+ options = {'destination-location': destination}
+ snapmirror_break = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-break', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_break,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error breaking SnapMirror relationship : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_release(self):
+ """
+ Release SnapMirror relationship from source cluster
+ """
+ options = {'destination-location': self.parameters['destination_path'],
+ 'relationship-info-only': self.na_helper.get_value_for_bool(False, self.parameters['relationship_info_only'])}
+ snapmirror_release = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-release', **options)
+ try:
+ self.source_server.invoke_successfully(snapmirror_release,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error releasing SnapMirror relationship : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_abort(self):
+ """
+ Abort a SnapMirror relationship in progress
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_abort = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-abort', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_abort,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error aborting SnapMirror relationship : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_initialize(self):
+ """
+ Initialize SnapMirror based on relationship state
+ """
+ current = self.snapmirror_get()
+ if current['mirror_state'] != 'snapmirrored':
+ initialize_zapi = 'snapmirror-initialize'
+ if self.parameters.get('relationship_type') and self.parameters['relationship_type'] == 'load_sharing':
+ initialize_zapi = 'snapmirror-initialize-ls-set'
+ options = {'source-location': self.parameters['source_path']}
+ else:
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_init = netapp_utils.zapi.NaElement.create_node_with_children(
+ initialize_zapi, **options)
+ try:
+ self.server.invoke_successfully(snapmirror_init,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error initializing SnapMirror : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_resync(self):
+ """
+ resync SnapMirror based on relationship state
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_resync = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-resync', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_resync, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error resyncing SnapMirror : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_resume(self):
+ """
+ resume SnapMirror based on relationship state
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_resume = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-resume', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_resume, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error resume SnapMirror : %s' % (to_native(error)), exception=traceback.format_exc())
+
+ def snapmirror_restore(self):
+ """
+ restore SnapMirror based on relationship state
+ """
+ options = {'destination-location': self.parameters['destination_path'],
+ 'source-location': self.parameters['source_path']}
+ if self.parameters.get('source_snapshot'):
+ options['source-snapshot'] = self.parameters['source_snapshot']
+ snapmirror_restore = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-restore', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_restore, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error restore SnapMirror : %s' % (to_native(error)), exception=traceback.format_exc())
+
+ def snapmirror_modify(self, modify):
+ """
+ Modify SnapMirror schedule or policy
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-modify', **options)
+ if modify.get('schedule') is not None:
+ snapmirror_modify.add_new_child('schedule', modify.get('schedule'))
+ if modify.get('policy'):
+ snapmirror_modify.add_new_child('policy', modify.get('policy'))
+ if modify.get('max_transfer_rate'):
+ snapmirror_modify.add_new_child('max-transfer-rate', str(modify.get('max_transfer_rate')))
+ try:
+ self.server.invoke_successfully(snapmirror_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying SnapMirror schedule or policy : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_update(self):
+ """
+ Update data in destination endpoint
+ """
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_update = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-update', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_update, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating SnapMirror : %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def check_parameters(self):
+ """
+ Validate parameters and fail if one or more required params are missing
+ Update source and destination path from vserver and volume parameters
+ """
+ if self.parameters['state'] == 'present'\
+ and (self.parameters.get('source_path') or self.parameters.get('destination_path')):
+ if not self.parameters.get('destination_path') or not self.parameters.get('source_path'):
+ self.module.fail_json(msg='Missing parameters: Source path or Destination path')
+ elif self.parameters.get('source_volume'):
+ if not self.parameters.get('source_vserver') or not self.parameters.get('destination_vserver'):
+ self.module.fail_json(msg='Missing parameters: source vserver or destination vserver or both')
+ self.parameters['source_path'] = self.parameters['source_vserver'] + ":" + self.parameters['source_volume']
+ self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":" +\
+ self.parameters['destination_volume']
+ elif self.parameters.get('source_vserver'):
+ self.parameters['source_path'] = self.parameters['source_vserver'] + ":"
+ self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":"
+
+ def get_destination(self):
+ result = None
+ release_get = netapp_utils.zapi.NaElement('snapmirror-get-destination-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ snapmirror_dest_info = netapp_utils.zapi.NaElement('snapmirror-destination-info')
+ snapmirror_dest_info.add_new_child('destination-location', self.parameters['destination_path'])
+ query.add_child_elem(snapmirror_dest_info)
+ release_get.add_child_elem(query)
+ try:
+ result = self.source_server.invoke_successfully(release_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snapmirror destinations info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ return True
+ return None
+
+ @staticmethod
+ def element_source_path_format_matches(value):
+ return re.match(pattern=r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\/lun\/[0-9]+",
+ string=value)
+
+ def check_elementsw_parameters(self, kind='source'):
+ """
+ Validate all ElementSW cluster parameters required for managing the SnapMirror relationship
+ Validate if both source and destination paths are present
+ Validate if source_path follows the required format
+ Validate SVIP
+ Validate if ElementSW volume exists
+ :return: None
+ """
+ path = None
+ if kind == 'destination':
+ path = self.parameters.get('destination_path')
+ elif kind == 'source':
+ path = self.parameters.get('source_path')
+ if path is None:
+ self.module.fail_json(msg="Error: Missing required parameter %s_path for "
+ "connection_type %s" % (kind, self.parameters['connection_type']))
+ else:
+ if NetAppONTAPSnapmirror.element_source_path_format_matches(path) is None:
+ self.module.fail_json(msg="Error: invalid %s_path %s. "
+ "If the path is a ElementSW cluster, the value should be of the format"
+ " <Element_SVIP>:/lun/<Element_VOLUME_ID>" % (kind, path))
+ # validate source_path
+ elementsw_helper, elem = self.set_element_connection(kind)
+ self.validate_elementsw_svip(path, elem)
+ self.check_if_elementsw_volume_exists(path, elementsw_helper)
+
+ def validate_elementsw_svip(self, path, elem):
+ """
+ Validate ElementSW cluster SVIP
+ :return: None
+ """
+ result = None
+ try:
+ result = elem.get_cluster_info()
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error fetching SVIP", exception=to_native(err))
+ if result and result.cluster_info.svip:
+ cluster_svip = result.cluster_info.svip
+ svip = path.split(':')[0] # split IP address from source_path
+ if svip != cluster_svip:
+ self.module.fail_json(msg="Error: Invalid SVIP")
+
+ def check_if_elementsw_volume_exists(self, path, elementsw_helper):
+ """
+ Check if remote ElementSW volume exists
+ :return: None
+ """
+ volume_id, vol_id = None, path.split('/')[-1]
+ try:
+ volume_id = elementsw_helper.volume_id_exists(int(vol_id))
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error fetching Volume details", exception=to_native(err))
+
+ if volume_id is None:
+ self.module.fail_json(msg="Error: Source volume does not exist in the ElementSW cluster")
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ if results is None:
+ # We may be running on a vserser
+ try:
+ netapp_utils.ems_log_event(event_name, self.server)
+ except netapp_utils.zapi.NaApiError:
+ # Don't fail if we cannot log usage
+ pass
+ else:
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ """
+ Apply action to SnapMirror
+ """
+ self.asup_log_for_cserver("na_ontap_snapmirror")
+ # source is ElementSW
+ if self.parameters['state'] == 'present' and self.parameters.get('connection_type') == 'elementsw_ontap':
+ self.check_elementsw_parameters()
+ elif self.parameters.get('connection_type') == 'ontap_elementsw':
+ self.check_elementsw_parameters('destination')
+ else:
+ self.check_parameters()
+ if self.parameters['state'] == 'present' and self.parameters.get('connection_type') == 'ontap_elementsw':
+ current_elementsw_ontap = self.snapmirror_get(self.parameters['source_path'])
+ if current_elementsw_ontap is None:
+ self.module.fail_json(msg='Error: creating an ONTAP to ElementSW snapmirror relationship requires an '
+ 'established SnapMirror relation from ElementSW to ONTAP cluster')
+ restore = self.parameters.get('relationship_type', '') == 'restore'
+ current = self.snapmirror_get() if not restore else None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters) if not restore else None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if not restore else None
+ element_snapmirror = False
+ if self.parameters['state'] == 'present' and restore:
+ self.na_helper.changed = True
+ if not self.module.check_mode:
+ self.snapmirror_restore()
+ elif cd_action == 'create':
+ if not self.module.check_mode:
+ self.snapmirror_create()
+ elif cd_action == 'delete':
+ if not self.module.check_mode:
+ if current['status'] == 'transferring':
+ self.snapmirror_abort()
+ else:
+ if self.parameters.get('connection_type') == 'elementsw_ontap':
+ element_snapmirror = True
+ self.delete_snapmirror(element_snapmirror, current['relationship'], current['mirror_state'])
+ else:
+ if modify:
+ if not self.module.check_mode:
+ self.snapmirror_modify(modify)
+ # break relationship when 'relationship_state' == 'broken'
+ if current and self.parameters['state'] == 'present' and self.parameters['relationship_state'] == 'broken':
+ if current['mirror_state'] == 'uninitialized':
+ self.module.fail_json(msg='SnapMirror relationship cannot be broken if mirror state is uninitialized')
+ elif current['relationship'] in ['load_sharing', 'vault']:
+ self.module.fail_json(msg='SnapMirror break is not allowed in a load_sharing or vault relationship')
+ elif current['mirror_state'] != 'broken-off':
+ if not self.module.check_mode:
+ self.snapmirror_break()
+ self.na_helper.changed = True
+ # check for initialize
+ elif current and self.parameters['initialize'] and self.parameters['relationship_state'] == 'active'\
+ and current['mirror_state'] == 'uninitialized':
+ if not self.module.check_mode:
+ self.snapmirror_initialize()
+ # set changed explicitly for initialize
+ self.na_helper.changed = True
+ if self.parameters['state'] == 'present' and self.parameters['relationship_state'] == 'active':
+ # resume when state is quiesced
+ if current['status'] == 'quiesced':
+ if not self.module.check_mode:
+ self.snapmirror_resume()
+ # set changed explicitly for resume
+ self.na_helper.changed = True
+ # resync when state is broken-off
+ if current['mirror_state'] == 'broken-off':
+ if not self.module.check_mode:
+ self.snapmirror_resync()
+ # set changed explicitly for resync
+ self.na_helper.changed = True
+ # Update when create is called again, or modify is being called
+ elif self.parameters['update']:
+ current = self.snapmirror_get()
+ if current['mirror_state'] == 'snapmirrored':
+ if not self.module.check_mode:
+ self.snapmirror_update()
+ self.na_helper.changed = True
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPSnapmirror()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py
new file mode 100644
index 00000000..94f3aeaf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py
@@ -0,0 +1,837 @@
+#!/usr/bin/python
+
+# (c) 2019-2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_snapmirror_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_snapmirror_policy
+short_description: NetApp ONTAP create, delete or modify SnapMirror policies
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- NetApp ONTAP create, modify, or destroy the SnapMirror policy
+- Add, modify and remove SnapMirror policy rules
+- Following parameters are not supported in REST; 'owner', 'restart', 'transfer_priority', 'tries', 'ignore_atime', 'common_snapshot_schedule'
+options:
+ state:
+ description:
+ - Whether the specified SnapMirror policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ vserver:
+ description:
+ - Specifies the vserver for the SnapMirror policy.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - Specifies the SnapMirror policy name.
+ required: true
+ type: str
+ policy_type:
+ description:
+ - Specifies the SnapMirror policy type. Modifying the type of an existing SnapMirror policy is not supported
+ choices: ['vault', 'async_mirror', 'mirror_vault', 'strict_sync_mirror', 'sync_mirror']
+ type: str
+ comment:
+ description:
+ - Specifies the SnapMirror policy comment.
+ type: str
+ tries:
+ description:
+ - Specifies the number of tries.
+ type: str
+ transfer_priority:
+ description:
+ - Specifies the priority at which a SnapMirror transfer runs.
+ choices: ['low', 'normal']
+ type: str
+ common_snapshot_schedule:
+ description:
+ - Specifies the common Snapshot copy schedule associated with the policy, only required for strict_sync_mirror and sync_mirror.
+ type: str
+ owner:
+ description:
+ - Specifies the owner of the SnapMirror policy.
+ choices: ['cluster_admin', 'vserver_admin']
+ type: str
+ is_network_compression_enabled:
+ description:
+ - Specifies whether network compression is enabled for transfers.
+ type: bool
+ ignore_atime:
+ description:
+ - Specifies whether incremental transfers will ignore files which have only their access time changed. Applies to SnapMirror vault relationships only.
+ type: bool
+ restart:
+ description:
+ - Defines the behavior of SnapMirror if an interrupted transfer exists, applies to data protection only.
+ choices: ['always', 'never', 'default']
+ type: str
+ snapmirror_label:
+ description:
+ - SnapMirror policy rule label.
+ - Required when defining policy rules.
+ - Use an empty list to remove all user-defined rules.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+ keep:
+ description:
+ - SnapMirror policy rule retention count for snapshots created.
+ - Required when defining policy rules.
+ type: list
+ elements: int
+ version_added: '20.7.0'
+ prefix:
+ description:
+ - SnapMirror policy rule prefix.
+ - Optional when defining policy rules.
+ - Set to '' to not set or remove an existing custom prefix.
+ - Prefix name should be unique within the policy.
+ - When specifying a custom prefix, schedule must also be specified.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+ schedule:
+ description:
+ - SnapMirror policy rule schedule.
+ - Optional when defining policy rules.
+ - Set to '' to not set or remove a schedule.
+ - When specifying a schedule a custom prefix can be set otherwise the prefix will be set to snapmirror_label.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+
+"""
+
+EXAMPLES = """
+ - name: Create SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ comment: "created by ansible"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "async_mirror"
+ transfer_priority: "low"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Create SnapMirror policy with basic rules
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "async_mirror"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Create SnapMirror policy with rules and schedules (no schedule for daily rule)
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ schedule: ['','weekly','monthly']
+ prefix: ['','','monthly_mv']
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy with rules, remove existing schedules and prefixes
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ schedule: ['','','']
+ prefix: ['','','']
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy, delete all rules (excludes builtin rules)
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: []
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Delete SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: absent
+ vserver: "SVM1"
+ policy_type: "async_mirror"
+ policy_name: "ansible_policy"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSnapMirrorPolicy(object):
+ """
+ Create, Modifies and Destroys a SnapMirror policy
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap SnapMirror policy class
+ """
+
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ policy_type=dict(required=False, type='str',
+ choices=['vault', 'async_mirror', 'mirror_vault', 'strict_sync_mirror', 'sync_mirror']),
+ tries=dict(required=False, type='str'),
+ transfer_priority=dict(required=False, type='str', choices=['low', 'normal']),
+ common_snapshot_schedule=dict(required=False, type='str'),
+ ignore_atime=dict(required=False, type='bool'),
+ is_network_compression_enabled=dict(required=False, type='bool'),
+ owner=dict(required=False, type='str', choices=['cluster_admin', 'vserver_admin']),
+ restart=dict(required=False, type='str', choices=['always', 'never', 'default']),
+ snapmirror_label=dict(required=False, type="list", elements="str"),
+ keep=dict(required=False, type="list", elements="int"),
+ prefix=dict(required=False, type="list", elements="str"),
+ schedule=dict(required=False, type="list", elements="str"),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['owner', 'restart', 'transfer_priority', 'tries', 'ignore_atime',
+ 'common_snapshot_schedule']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+
+ if error:
+ self.module.fail_json(msg=error)
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_snapmirror_policy(self):
+
+ if self.use_rest:
+ data = {'fields': 'uuid,name,svm.name,comment,network_compression_enabled,type,retention',
+ 'name': self.parameters['policy_name'],
+ 'svm.name': self.parameters['vserver']}
+ api = "snapmirror/policies"
+ message, error = self.rest_api.get(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message['records']) != 0:
+ return_value = {
+ 'uuid': message['records'][0]['uuid'],
+ 'vserver': message['records'][0]['svm']['name'],
+ 'policy_name': message['records'][0]['name'],
+ 'comment': '',
+ 'is_network_compression_enabled': message['records'][0]['network_compression_enabled'],
+ 'snapmirror_label': list(),
+ 'keep': list(),
+ 'prefix': list(),
+ 'schedule': list()
+ }
+ if 'type' in message['records'][0]:
+ policy_type = message['records'][0]['type']
+ if policy_type == 'async':
+ policy_type = 'async_mirror'
+ elif policy_type == 'sync':
+ policy_type = 'sync_mirror'
+ return_value['policy_type'] = policy_type
+ if 'comment' in message['records'][0]:
+ return_value['comment'] = message['records'][0]['comment']
+ if 'retention' in message['records'][0]:
+ for rule in message['records'][0]['retention']:
+ return_value['snapmirror_label'].append(rule['label'])
+ return_value['keep'].append(int(rule['count']))
+ if rule['prefix'] == '-':
+ return_value['prefix'].append('')
+ else:
+ return_value['prefix'].append(rule['prefix'])
+ if rule['creation_schedule']['name'] == '-':
+ return_value['schedule'].append('')
+ else:
+ return_value['schedule'].append(rule['creation_schedule']['name'])
+ return return_value
+ return None
+ else:
+ return_value = None
+
+ snapmirror_policy_get_iter = netapp_utils.zapi.NaElement('snapmirror-policy-get-iter')
+ snapmirror_policy_info = netapp_utils.zapi.NaElement('snapmirror-policy-info')
+ snapmirror_policy_info.add_new_child('policy-name', self.parameters['policy_name'])
+ snapmirror_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(snapmirror_policy_info)
+ snapmirror_policy_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(snapmirror_policy_get_iter, True)
+ if result.get_child_by_name('attributes-list'):
+ snapmirror_policy_attributes = result['attributes-list']['snapmirror-policy-info']
+
+ return_value = {
+ 'policy_name': snapmirror_policy_attributes['policy-name'],
+ 'tries': snapmirror_policy_attributes['tries'],
+ 'transfer_priority': snapmirror_policy_attributes['transfer-priority'],
+ 'is_network_compression_enabled': self.na_helper.get_value_for_bool(True,
+ snapmirror_policy_attributes['is-network-compression-enabled']),
+ 'restart': snapmirror_policy_attributes['restart'],
+ 'ignore_atime': self.na_helper.get_value_for_bool(True, snapmirror_policy_attributes['ignore-atime']),
+ 'vserver': snapmirror_policy_attributes['vserver-name'],
+ 'comment': '',
+ 'snapmirror_label': list(),
+ 'keep': list(),
+ 'prefix': list(),
+ 'schedule': list()
+ }
+ if snapmirror_policy_attributes.get_child_content('comment') is not None:
+ return_value['comment'] = snapmirror_policy_attributes['comment']
+
+ if snapmirror_policy_attributes.get_child_content('type') is not None:
+ return_value['policy_type'] = snapmirror_policy_attributes['type']
+
+ if snapmirror_policy_attributes.get_child_by_name('snapmirror-policy-rules'):
+ for rule in snapmirror_policy_attributes['snapmirror-policy-rules'].get_children():
+ # Ignore builtin rules
+ if rule.get_child_content('snapmirror-label') == "sm_created" or \
+ rule.get_child_content('snapmirror-label') == "all_source_snapshots":
+ continue
+
+ return_value['snapmirror_label'].append(rule.get_child_content('snapmirror-label'))
+ return_value['keep'].append(int(rule.get_child_content('keep')))
+
+ prefix = rule.get_child_content('prefix')
+ if prefix is None or prefix == '-':
+ prefix = ''
+ return_value['prefix'].append(prefix)
+
+ schedule = rule.get_child_content('schedule')
+ if schedule is None or schedule == '-':
+ schedule = ''
+ return_value['schedule'].append(schedule)
+
+ except netapp_utils.zapi.NaApiError as error:
+ if 'NetApp API failed. Reason - 13001:' in to_native(error):
+ # Policy does not exist
+ pass
+ else:
+ self.module.fail_json(msg='Error getting snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return return_value
+
+ def validate_parameters(self):
+ """
+ Validate snapmirror policy rules
+ :return: None
+ """
+
+ # For snapmirror policy rules, 'snapmirror_label' is required.
+ if 'snapmirror_label' in self.parameters:
+
+ # Check size of 'snapmirror_label' list is 0-10. Can have zero rules.
+ # Take builtin 'sm_created' rule into account for 'mirror_vault'.
+ if (('policy_type' in self.parameters and
+ self.parameters['policy_type'] == 'mirror_vault' and
+ len(self.parameters['snapmirror_label']) > 9) or
+ len(self.parameters['snapmirror_label']) > 10):
+ self.module.fail_json(msg="Error: A SnapMirror Policy can have up to a maximum of "
+ "10 rules (including builtin rules), with a 'keep' value "
+ "representing the maximum number of Snapshot copies for each rule")
+
+ # 'keep' must be supplied as long as there is at least one snapmirror_label
+ if len(self.parameters['snapmirror_label']) > 0 and 'keep' not in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'keep' parameter. When specifying the "
+ "'snapmirror_label' parameter, the 'keep' parameter must "
+ "also be supplied")
+
+ # Make sure other rule values match same number of 'snapmirror_label' values.
+ for rule_parameter in ['keep', 'prefix', 'schedule']:
+ if rule_parameter in self.parameters:
+ if len(self.parameters['snapmirror_label']) > len(self.parameters[rule_parameter]):
+ self.module.fail_json(msg="Error: Each 'snapmirror_label' value must have "
+ "an accompanying '%s' value" % rule_parameter)
+ if len(self.parameters[rule_parameter]) > len(self.parameters['snapmirror_label']):
+ self.module.fail_json(msg="Error: Each '%s' value must have an accompanying "
+ "'snapmirror_label' value" % rule_parameter)
+ else:
+ # 'snapmirror_label' not supplied.
+ # Bail out if other rule parameters have been supplied.
+ for rule_parameter in ['keep', 'prefix', 'schedule']:
+ if rule_parameter in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'snapmirror_label' parameter. When "
+ "specifying the '%s' parameter, the 'snapmirror_label' "
+ "parameter must also be supplied" % rule_parameter)
+
+ # Schedule must be supplied if prefix is supplied.
+ if 'prefix' in self.parameters and 'schedule' not in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'schedule' parameter. When "
+ "specifying the 'prefix' parameter, the 'schedule' "
+ "parameter must also be supplied")
+
+ def create_snapmirror_policy(self):
+ """
+ Creates a new storage efficiency policy
+ """
+ self.validate_parameters()
+ if self.use_rest:
+ data = {'name': self.parameters['policy_name'],
+ 'svm': {'name': self.parameters['vserver']}}
+ if 'policy_type' in self.parameters.keys():
+ if 'async_mirror' in self.parameters['policy_type']:
+ data['type'] = 'async'
+ elif 'sync_mirror' in self.parameters['policy_type']:
+ data['type'] = 'sync'
+ data['sync_type'] = 'sync'
+ else:
+ self.module.fail_json(msg='policy type in REST only supports options async_mirror or sync_mirror, given %s'
+ % (self.parameters['policy_type']))
+ data = self.create_snapmirror_policy_obj_for_rest(data, data['type'])
+ else:
+ data = self.create_snapmirror_policy_obj_for_rest(data)
+ api = "snapmirror/policies"
+ response, error = self.rest_api.post(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'job' in response:
+ message, error = self.rest_api.wait_on_job(response['job'], increment=5)
+ if error:
+ self.module.fail_json(msg="%s" % error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-create")
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ if 'policy_type' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("type", self.parameters['policy_type'])
+ snapmirror_policy_obj = self.create_snapmirror_policy_obj(snapmirror_policy_obj)
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_snapmirror_policy_obj(self, snapmirror_policy_obj):
+ if 'comment' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("comment", self.parameters['comment'])
+ if 'common_snapshot_schedule' in self.parameters.keys() and 'sync_mirror' in self.parameters['policy_type']:
+ snapmirror_policy_obj.add_new_child("common-snapshot-schedule", self.parameters['common_snapshot_schedule'])
+ if 'ignore_atime' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("ignore-atime", self.na_helper.get_value_for_bool(False, self.parameters['ignore_atime']))
+ if 'is_network_compression_enabled' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("is-network-compression-enabled",
+ self.na_helper.get_value_for_bool(False, self.parameters['is_network_compression_enabled']))
+ if 'owner' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("owner", self.parameters['owner'])
+ if 'restart' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("restart", self.parameters['restart'])
+ if 'transfer_priority' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("transfer-priority", self.parameters['transfer_priority'])
+ if 'tries' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("tries", self.parameters['tries'])
+ return snapmirror_policy_obj
+
+ def create_snapmirror_policy_obj_for_rest(self, snapmirror_policy_obj, policy_type=None):
+ if 'comment' in self.parameters.keys():
+ snapmirror_policy_obj["comment"] = self.parameters['comment']
+ if 'is_network_compression_enabled' in self.parameters:
+ if policy_type == 'async':
+ snapmirror_policy_obj["network_compression_enabled"] = self.parameters['is_network_compression_enabled']
+ elif policy_type == 'sync':
+ self.module.fail_json(msg="Input parameter network_compression_enabled is not valid for SnapMirror policy type sync")
+ return snapmirror_policy_obj
+
+ def create_snapmirror_policy_retention_obj_for_rest(self, rules=None):
+ """
+ Create SnapMirror policy retention REST object.
+ :param list rules: e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': 'daily', 'schedule': 'daily'}, ... ]
+ :return: List of retention REST objects.
+ e.g. [{'label': 'daily', 'count': 7, 'prefix': 'daily', 'creation_schedule': {'name': 'daily'}}, ... ]
+ """
+ snapmirror_policy_retention_objs = list()
+ if rules is not None:
+ for rule in rules:
+ retention = {'label': rule['snapmirror_label'], 'count': str(rule['keep'])}
+ if 'prefix' in rule and rule['prefix'] != '':
+ retention['prefix'] = rule['prefix']
+ if 'schedule' in rule and rule['schedule'] != '':
+ retention['creation_schedule'] = {'name': rule['schedule']}
+ snapmirror_policy_retention_objs.append(retention)
+ return snapmirror_policy_retention_objs
+
+ def delete_snapmirror_policy(self, uuid=None):
+ """
+ Deletes a snapmirror policy
+ """
+ if self.use_rest:
+ api = "snapmirror/policies/%s" % uuid
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-delete")
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapmirror_policy(self, uuid=None, policy_type=None):
+ """
+ Modifies a snapmirror policy
+ """
+ if self.use_rest:
+ api = "snapmirror/policies/" + uuid
+ data = self.create_snapmirror_policy_obj_for_rest(dict(), policy_type)
+ dummy, error = self.rest_api.patch(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-modify")
+ snapmirror_policy_obj = self.create_snapmirror_policy_obj(snapmirror_policy_obj)
+ # Only modify snapmirror policy if a specific snapmirror policy attribute needs
+ # modifying. It may be that only snapmirror policy rules are being modified.
+ if snapmirror_policy_obj.get_children():
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def identify_new_snapmirror_policy_rules(self, current=None):
+ """
+ Identify new rules that should be added.
+ :return: List of new rules to be added
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ new_rules = list()
+ if 'snapmirror_label' in self.parameters:
+ for snapmirror_label in self.parameters['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+
+ # Construct new rule. prefix and schedule are optional.
+ snapmirror_label_index = self.parameters['snapmirror_label'].index(snapmirror_label)
+ rule = dict({
+ 'snapmirror_label': snapmirror_label,
+ 'keep': self.parameters['keep'][snapmirror_label_index]
+ })
+ if 'prefix' in self.parameters:
+ rule['prefix'] = self.parameters['prefix'][snapmirror_label_index]
+ else:
+ rule['prefix'] = ''
+ if 'schedule' in self.parameters:
+ rule['schedule'] = self.parameters['schedule'][snapmirror_label_index]
+ else:
+ rule['schedule'] = ''
+
+ if current is not None and 'snapmirror_label' in current:
+ if snapmirror_label not in current['snapmirror_label']:
+ # Rule doesn't exist. Add new rule.
+ new_rules.append(rule)
+ else:
+ # No current or any rules. Add new rule.
+ new_rules.append(rule)
+ return new_rules
+
+ def identify_obsolete_snapmirror_policy_rules(self, current=None):
+ """
+ Identify existing rules that should be deleted
+ :return: List of rules to be deleted
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ obsolete_rules = list()
+ if 'snapmirror_label' in self.parameters:
+ if current is not None and 'snapmirror_label' in current:
+ # Iterate existing rules.
+ for snapmirror_label in current['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+ if snapmirror_label not in [item.strip() for item in self.parameters['snapmirror_label']]:
+ # Existing rule isn't in parameters. Delete existing rule.
+ current_snapmirror_label_index = current['snapmirror_label'].index(snapmirror_label)
+ rule = dict({
+ 'snapmirror_label': snapmirror_label,
+ 'keep': current['keep'][current_snapmirror_label_index],
+ 'prefix': current['prefix'][current_snapmirror_label_index],
+ 'schedule': current['schedule'][current_snapmirror_label_index]
+ })
+ obsolete_rules.append(rule)
+ return obsolete_rules
+
+ def identify_modified_snapmirror_policy_rules(self, current=None):
+ """
+ Identify self.parameters rules that will be modified or not.
+ :return: List of 'modified' rules and a list of 'unmodified' rules
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ modified_rules = list()
+ unmodified_rules = list()
+ if 'snapmirror_label' in self.parameters:
+ for snapmirror_label in self.parameters['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+ if current is not None and 'snapmirror_label' in current:
+ if snapmirror_label in current['snapmirror_label']:
+ # Rule exists. Identify whether it requires modification or not.
+ modified = False
+ rule = dict()
+ rule['snapmirror_label'] = snapmirror_label
+
+ # Get indexes of current and supplied rule.
+ current_snapmirror_label_index = current['snapmirror_label'].index(snapmirror_label)
+ snapmirror_label_index = self.parameters['snapmirror_label'].index(snapmirror_label)
+
+ # Check if keep modified
+ if self.parameters['keep'][snapmirror_label_index] != current['keep'][current_snapmirror_label_index]:
+ modified = True
+ rule['keep'] = self.parameters['keep'][snapmirror_label_index]
+ else:
+ rule['keep'] = current['keep'][current_snapmirror_label_index]
+
+ # Check if prefix modified
+ if 'prefix' in self.parameters:
+ if self.parameters['prefix'][snapmirror_label_index] != current['prefix'][current_snapmirror_label_index]:
+ modified = True
+ rule['prefix'] = self.parameters['prefix'][snapmirror_label_index]
+ else:
+ rule['prefix'] = current['prefix'][current_snapmirror_label_index]
+ else:
+ rule['prefix'] = current['prefix'][current_snapmirror_label_index]
+
+ # Check if schedule modified
+ if 'schedule' in self.parameters:
+ if self.parameters['schedule'][snapmirror_label_index] != current['schedule'][current_snapmirror_label_index]:
+ modified = True
+ rule['schedule'] = self.parameters['schedule'][snapmirror_label_index]
+ else:
+ rule['schedule'] = current['schedule'][current_snapmirror_label_index]
+ else:
+ rule['schedule'] = current['schedule'][current_snapmirror_label_index]
+
+ if modified:
+ modified_rules.append(rule)
+ else:
+ unmodified_rules.append(rule)
+ return modified_rules, unmodified_rules
+
+ def identify_snapmirror_policy_rules_with_schedule(self, rules=None):
+ """
+ Identify rules that are using a schedule or not. At least one
+ non-schedule rule must be added to a policy before schedule rules
+ are added.
+ :return: List of rules with schedules and a list of rules without schedules
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': 'daily', 'schedule': 'daily'}, ... ],
+ [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ schedule_rules = list()
+ non_schedule_rules = list()
+ if rules is not None:
+ for rule in rules:
+ if 'schedule' in rule:
+ schedule_rules.append(rule)
+ else:
+ non_schedule_rules.append(rule)
+ return schedule_rules, non_schedule_rules
+
+ def modify_snapmirror_policy_rules(self, current=None, uuid=None):
+ """
+ Modify existing rules in snapmirror policy
+ :return: None
+ """
+ self.validate_parameters()
+
+ # Need 'snapmirror_label' to add/modify/delete rules
+ if 'snapmirror_label' not in self.parameters:
+ return
+
+ obsolete_rules = self.identify_obsolete_snapmirror_policy_rules(current)
+ new_rules = self.identify_new_snapmirror_policy_rules(current)
+ modified_rules, unmodified_rules = self.identify_modified_snapmirror_policy_rules(current)
+
+ if self.use_rest:
+ api = "snapmirror/policies/" + uuid
+ data = {'retention': list()}
+
+ # As rule 'prefix' can't be unset, have to delete existing rules first.
+ # Builtin rules remain.
+ dummy, error = self.rest_api.patch(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+
+ # Re-add desired rules.
+ rules = unmodified_rules + modified_rules + new_rules
+ data['retention'] = self.create_snapmirror_policy_retention_obj_for_rest(rules)
+
+ if len(data['retention']) > 0:
+ dummy, error = self.rest_api.patch(api, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ delete_rules = obsolete_rules + modified_rules
+ add_schedule_rules, add_non_schedule_rules = self.identify_snapmirror_policy_rules_with_schedule(new_rules + modified_rules)
+ # Delete rules no longer required or modified rules that will be re-added.
+ for rule in delete_rules:
+ options = {'policy-name': self.parameters['policy_name'],
+ 'snapmirror-label': rule['snapmirror_label']}
+ self.modify_snapmirror_policy_rule(options, 'snapmirror-policy-remove-rule')
+
+ # Add rules. At least one non-schedule rule must exist before
+ # a rule with a schedule can be added, otherwise zapi will complain.
+ for rule in add_non_schedule_rules + add_schedule_rules:
+ options = {'policy-name': self.parameters['policy_name'],
+ 'snapmirror-label': rule['snapmirror_label'],
+ 'keep': str(rule['keep'])}
+ if 'prefix' in rule and rule['prefix'] != '':
+ options['prefix'] = rule['prefix']
+ if 'schedule' in rule and rule['schedule'] != '':
+ options['schedule'] = rule['schedule']
+ self.modify_snapmirror_policy_rule(options, 'snapmirror-policy-add-rule')
+
+ def modify_snapmirror_policy_rule(self, options, zapi):
+ """
+ Add, modify or remove a rule to/from a snapmirror policy
+ """
+ snapmirror_obj = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(snapmirror_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapmirror policy rule %s: %s' %
+ (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self):
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_snapmirror_policy", cserver)
+
+ def apply(self):
+ uuid = None
+ if not self.use_rest:
+ self.asup_log_for_cserver()
+ current, modify = self.get_snapmirror_policy(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if current and cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if 'policy_type' in modify:
+ self.module.fail_json(msg='Error: policy type cannot be changed: current=%s, expected=%s' %
+ (current.get('policy_type'), modify['policy_type']))
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_snapmirror_policy()
+ if self.use_rest:
+ current = self.get_snapmirror_policy()
+ uuid = current['uuid']
+ self.modify_snapmirror_policy_rules(current, uuid)
+ else:
+ self.modify_snapmirror_policy_rules(current)
+ elif cd_action == 'delete':
+ if self.use_rest:
+ uuid = current['uuid']
+ self.delete_snapmirror_policy(uuid)
+ elif modify:
+ if self.use_rest:
+ uuid = current['uuid']
+ self.modify_snapmirror_policy(uuid, current['policy_type'])
+ self.modify_snapmirror_policy_rules(current, uuid)
+ else:
+ self.modify_snapmirror_policy()
+ self.modify_snapmirror_policy_rules(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap SnapMirror policy object and runs the correct play task
+ """
+ obj = NetAppOntapSnapMirrorPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py
new file mode 100644
index 00000000..fdc32b5b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py
@@ -0,0 +1,333 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_snapshot
+short_description: NetApp ONTAP manage Snapshots
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete ONTAP snapshots
+options:
+ state:
+ description:
+ - If you want to create/modify a snapshot, or delete it.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ snapshot:
+ description:
+ Name of the snapshot to be managed.
+ The maximum string length is 256 characters.
+ required: true
+ type: str
+ from_name:
+ description:
+ - Name of the existing snapshot to be renamed to.
+ version_added: 2.8.0
+ type: str
+ volume:
+ description:
+ - Name of the volume on which the snapshot is to be created.
+ required: true
+ type: str
+ async_bool:
+ description:
+ - If true, the snapshot is to be created asynchronously.
+ type: bool
+ comment:
+ description:
+ A human readable comment attached with the snapshot.
+ The size of the comment can be at most 255 characters.
+ type: str
+ snapmirror_label:
+ description:
+ A human readable SnapMirror Label attached with the snapshot.
+ Size of the label can be at most 31 characters.
+ type: str
+ ignore_owners:
+ description:
+ - if this field is true, snapshot will be deleted
+ even if some other processes are accessing it.
+ type: bool
+ snapshot_instance_uuid:
+ description:
+ - The 128 bit unique snapshot identifier expressed in the form of UUID.
+ type: str
+ vserver:
+ description:
+ - The Vserver name
+ required: true
+ type: str
+'''
+EXAMPLES = """
+ - name: create SnapShot
+ tags:
+ - create
+ na_ontap_snapshot:
+ state: present
+ snapshot: "{{ snapshot name }}"
+ volume: "{{ vol name }}"
+ comment: "i am a comment"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ - name: delete SnapShot
+ tags:
+ - delete
+ na_ontap_snapshot:
+ state: absent
+ snapshot: "{{ snapshot name }}"
+ volume: "{{ vol name }}"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ - name: modify SnapShot
+ tags:
+ - modify
+ na_ontap_snapshot:
+ state: present
+ snapshot: "{{ snapshot name }}"
+ comment: "New comments are great"
+ volume: "{{ vol name }}"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSnapshot(object):
+ """
+ Creates, modifies, and deletes a Snapshot
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ from_name=dict(required=False, type='str'),
+ snapshot=dict(required=True, type="str"),
+ volume=dict(required=True, type="str"),
+ async_bool=dict(required=False, type="bool", default=False),
+ comment=dict(required=False, type="str"),
+ snapmirror_label=dict(required=False, type="str"),
+ ignore_owners=dict(required=False, type="bool", default=False),
+ snapshot_instance_uuid=dict(required=False, type="str"),
+ vserver=dict(required=True, type="str"),
+
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def get_snapshot(self, snapshot_name=None):
+ """
+ Checks to see if a snapshot exists or not
+ :return: Return True if a snapshot exists, False if it doesn't
+ """
+ if snapshot_name is None:
+ snapshot_name = self.parameters['snapshot']
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter")
+ desired_attr = netapp_utils.zapi.NaElement("desired-attributes")
+ snapshot_info = netapp_utils.zapi.NaElement('snapshot-info')
+ comment = netapp_utils.zapi.NaElement('comment')
+ snapmirror_label = netapp_utils.zapi.NaElement('snapmirror-label')
+ # add more desired attributes that are allowed to be modified
+ snapshot_info.add_child_elem(comment)
+ snapshot_info.add_child_elem(snapmirror_label)
+ desired_attr.add_child_elem(snapshot_info)
+ snapshot_obj.add_child_elem(desired_attr)
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", snapshot_name)
+ snapshot_info_obj.add_new_child("volume", self.parameters['volume'])
+ snapshot_info_obj.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ snap_info = attributes_list.get_child_by_name('snapshot-info')
+ return_value = {'comment': snap_info.get_child_content('comment')}
+ if snap_info.get_child_by_name('snapmirror-label'):
+ return_value['snapmirror_label'] = snap_info.get_child_content('snapmirror-label')
+ else:
+ return_value['snapmirror_label'] = None
+ return return_value
+
+ def create_snapshot(self):
+ """
+ Creates a new snapshot
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-create")
+
+ # set up required variables to create a snapshot
+ snapshot_obj.add_new_child("snapshot", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ # Set up optional variables to create a snapshot
+ if self.parameters.get('async_bool'):
+ snapshot_obj.add_new_child("async", str(self.parameters['async_bool']))
+ if self.parameters.get('comment'):
+ snapshot_obj.add_new_child("comment", self.parameters['comment'])
+ if self.parameters.get('snapmirror_label'):
+ snapshot_obj.add_new_child(
+ "snapmirror-label", self.parameters['snapmirror_label'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_snapshot(self):
+ """
+ Deletes an existing snapshot
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-delete")
+
+ # Set up required variables to delete a snapshot
+ snapshot_obj.add_new_child("snapshot", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ # set up optional variables to delete a snapshot
+ if self.parameters.get('ignore_owners'):
+ snapshot_obj.add_new_child("ignore-owners", str(self.parameters['ignore_owners']))
+ if self.parameters.get('snapshot_instance_uuid'):
+ snapshot_obj.add_new_child("snapshot-instance-uuid", self.parameters['snapshot_instance_uuid'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapshot(self):
+ """
+ Modify an existing snapshot
+ :return:
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-modify-iter")
+ # Create query object, this is the existing object
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.parameters['snapshot'])
+ snapshot_info_obj.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+
+ # this is what we want to modify in the snapshot object
+ attributes = netapp_utils.zapi.NaElement("attributes")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.parameters['snapshot'])
+ if self.parameters.get('comment'):
+ snapshot_info_obj.add_new_child("comment", self.parameters['comment'])
+ if self.parameters.get('snapmirror_label'):
+ snapshot_info_obj.add_new_child("snapmirror-label", self.parameters['snapmirror_label'])
+ attributes.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(attributes)
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_snapshot(self):
+ """
+ Rename the sanpshot
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-rename")
+
+ # set up required variables to rename a snapshot
+ snapshot_obj.add_new_child("current-name", self.parameters['from_name'])
+ snapshot_obj.add_new_child("new-name", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming snapshot %s to %s: %s' %
+ (self.parameters['from_name'], self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Check to see which play we should run
+ """
+ current = self.get_snapshot()
+ netapp_utils.ems_log_event("na_ontap_snapshot", self.server)
+ rename, cd_action = None, None
+ modify = {}
+ if self.parameters.get('from_name'):
+ current_old_name = self.get_snapshot(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(current_old_name, current)
+ modify = self.na_helper.get_modified_attributes(current_old_name, self.parameters)
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_snapshot()
+ if cd_action == 'create':
+ self.create_snapshot()
+ elif cd_action == 'delete':
+ self.delete_snapshot()
+ elif modify:
+ self.modify_snapshot()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates, modifies, and deletes a Snapshot
+ """
+ obj = NetAppOntapSnapshot()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py
new file mode 100644
index 00000000..ac9cd674
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py
@@ -0,0 +1,500 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_snapshot_policy
+short_description: NetApp ONTAP manage Snapshot Policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete ONTAP snapshot policies
+options:
+ state:
+ description:
+ - If you want to create, modify or delete a snapshot policy.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ Name of the snapshot policy to be managed.
+ The maximum string length is 256 characters.
+ required: true
+ type: str
+ enabled:
+ description:
+ - Status of the snapshot policy indicating whether the policy will be enabled or disabled.
+ type: bool
+ comment:
+ description:
+ A human readable comment attached with the snapshot.
+ The size of the comment can be at most 255 characters.
+ type: str
+ count:
+ description:
+ Retention count for the snapshots created by the schedule.
+ type: list
+ elements: int
+ schedule:
+ description:
+ - Schedule to be added inside the policy.
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Snapshot name prefix for the schedule.
+ - Prefix name should be unique within the policy.
+ - Cannot set a different prefix to a schedule that has already been assigned to a snapshot policy.
+ - Prefix cannot be modifed after schedule has been added.
+ type: list
+ elements: str
+ required: false
+ version_added: '19.10.1'
+ snapmirror_label:
+ description:
+ - SnapMirror label assigned to each schedule inside the policy. Use an empty
+ string ('') for no label.
+ type: list
+ elements: str
+ required: false
+ version_added: 2.9.0
+ vserver:
+ description:
+ - The name of the vserver to use. In a multi-tenanted environment, assigning a
+ Snapshot Policy to a vserver will restrict its use to that vserver.
+ required: false
+ type: str
+ version_added: 2.9.0
+'''
+EXAMPLES = """
+ - name: Create Snapshot policy
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: hourly
+ prefix: hourly
+ count: 150
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Create Snapshot policy with multiple schedules
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ prefix: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ count: [1, 2, 3, 4, 5]
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Create Snapshot policy owned by a vserver
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible3
+ vserver: ansible
+ schedule: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ prefix: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ count: [1, 2, 3, 4, 5]
+ snapmirror_label: ['hourly', 'daily', 'weekly', 'monthly', '']
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Modify Snapshot policy with multiple schedules
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: ['daily', 'weekly']
+ count: [20, 30]
+ snapmirror_label: ['daily', 'weekly']
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Delete Snapshot policy
+ na_ontap_snapshot_policy:
+ state: absent
+ name: ansible2
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSnapshotPolicy(object):
+ """
+ Creates and deletes a Snapshot Policy
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type="str"),
+ enabled=dict(required=False, type="bool"),
+ # count is a list of integers
+ count=dict(required=False, type="list", elements="int"),
+ comment=dict(required=False, type="str"),
+ schedule=dict(required=False, type="list", elements="str"),
+ prefix=dict(required=False, type="list", elements="str"),
+ snapmirror_label=dict(required=False, type="list", elements="str"),
+ vserver=dict(required=False, type="str")
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['enabled', 'count', 'schedule']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ if 'vserver' in self.parameters:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_snapshot_policy(self):
+ """
+ Checks to see if a snapshot policy exists or not
+ :return: Return policy details if a snapshot policy exists, None if it doesn't
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-policy-get-iter")
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-policy-info")
+ snapshot_info_obj.add_new_child("policy", self.parameters['name'])
+ if 'vserver' in self.parameters:
+ snapshot_info_obj.add_new_child("vserver-name", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ snapshot_policy = result.get_child_by_name('attributes-list').get_child_by_name('snapshot-policy-info')
+ current = {}
+ current['name'] = snapshot_policy.get_child_content('policy')
+ current['vserver'] = snapshot_policy.get_child_content('vserver-name')
+ current['enabled'] = False if snapshot_policy.get_child_content('enabled').lower() == 'false' else True
+ current['comment'] = snapshot_policy.get_child_content('comment') or ''
+ current['schedule'], current['count'], current['snapmirror_label'], current['prefix'] = [], [], [], []
+ if snapshot_policy.get_child_by_name('snapshot-policy-schedules'):
+ for schedule in snapshot_policy['snapshot-policy-schedules'].get_children():
+ current['schedule'].append(schedule.get_child_content('schedule'))
+ current['count'].append(int(schedule.get_child_content('count')))
+
+ snapmirror_label = schedule.get_child_content('snapmirror-label')
+ if snapmirror_label is None or snapmirror_label == '-':
+ snapmirror_label = ''
+ current['snapmirror_label'].append(snapmirror_label)
+
+ prefix = schedule.get_child_content('prefix')
+ if prefix is None or prefix == '-':
+ prefix = ''
+ current['prefix'].append(prefix)
+ return current
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return None
+
+ def validate_parameters(self):
+ """
+ Validate if each schedule has a count associated
+ :return: None
+ """
+ if 'count' not in self.parameters or 'schedule' not in self.parameters or \
+ len(self.parameters['count']) > 5 or len(self.parameters['schedule']) > 5 or \
+ len(self.parameters['count']) < 1 or len(self.parameters['schedule']) < 1 or \
+ len(self.parameters['count']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: A Snapshot policy must have at least 1 "
+ "schedule and can have up to a maximum of 5 schedules, with a count "
+ "representing the maximum number of Snapshot copies for each schedule")
+
+ if 'snapmirror_label' in self.parameters:
+ if len(self.parameters['snapmirror_label']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: Each Snapshot Policy schedule must have an "
+ "accompanying SnapMirror Label")
+
+ if 'prefix' in self.parameters:
+ if len(self.parameters['prefix']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: Each Snapshot Policy schedule must have an "
+ "accompanying prefix")
+
+ def modify_snapshot_policy(self, current):
+ """
+ Modifies an existing snapshot policy
+ """
+ # Set up required variables to modify snapshot policy
+ options = {'policy': self.parameters['name']}
+ modify = False
+
+ # Set up optional variables to modify snapshot policy
+ if 'enabled' in self.parameters and self.parameters['enabled'] != current['enabled']:
+ options['enabled'] = str(self.parameters['enabled'])
+ modify = True
+ if 'comment' in self.parameters and self.parameters['comment'] != current['comment']:
+ options['comment'] = self.parameters['comment']
+ modify = True
+
+ if modify:
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-policy-modify', **options)
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapshot_policy_schedules(self, current):
+ """
+ Modify existing schedules in snapshot policy
+ :return: None
+ """
+ self.validate_parameters()
+
+ delete_schedules, modify_schedules, add_schedules = [], [], []
+
+ if 'snapmirror_label' in self.parameters:
+ snapmirror_labels = self.parameters['snapmirror_label']
+ else:
+ # User hasn't supplied any snapmirror labels.
+ snapmirror_labels = [None] * len(self.parameters['schedule'])
+
+ # Identify schedules for deletion
+ for schedule in current['schedule']:
+ schedule = schedule.strip()
+ if schedule not in [item.strip() for item in self.parameters['schedule']]:
+ options = {'policy': current['name'],
+ 'schedule': schedule}
+ delete_schedules.append(options)
+
+ # Identify schedules to be modified or added
+ for schedule, count, snapmirror_label in zip(self.parameters['schedule'], self.parameters['count'], snapmirror_labels):
+ schedule = schedule.strip()
+ if snapmirror_label is not None:
+ snapmirror_label = snapmirror_label.strip()
+
+ options = {'policy': current['name'],
+ 'schedule': schedule}
+
+ if schedule in current['schedule']:
+ # Schedule exists. Only modify if it has changed.
+ modify = False
+ schedule_index = current['schedule'].index(schedule)
+
+ if count != current['count'][schedule_index]:
+ options['new-count'] = str(count)
+ modify = True
+
+ if snapmirror_label is not None:
+ if snapmirror_label != current['snapmirror_label'][schedule_index]:
+ options['new-snapmirror-label'] = snapmirror_label
+ modify = True
+
+ if modify:
+ modify_schedules.append(options)
+ else:
+ # New schedule
+ options['count'] = str(count)
+ if snapmirror_label is not None and snapmirror_label != '':
+ options['snapmirror-label'] = snapmirror_label
+ add_schedules.append(options)
+
+ # Delete N-1 schedules no longer required. Must leave 1 schedule in policy
+ # at any one time. Delete last one afterwards.
+ while len(delete_schedules) > 1:
+ options = delete_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-remove-schedule')
+
+ # Modify schedules.
+ while len(modify_schedules) > 0:
+ options = modify_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-modify-schedule')
+
+ # Add N-1 new schedules. Add last one after last schedule has been deleted.
+ while len(add_schedules) > 1:
+ options = add_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-add-schedule')
+
+ # Delete last schedule no longer required.
+ while len(delete_schedules) > 0:
+ options = delete_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-remove-schedule')
+
+ # Add last new schedule.
+ while len(add_schedules) > 0:
+ options = add_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-add-schedule')
+
+ def modify_snapshot_policy_schedule(self, options, zapi):
+ """
+ Add, modify or remove a schedule to/from a snapshot policy
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(snapshot_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot policy schedule %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_snapshot_policy(self):
+ """
+ Creates a new snapshot policy
+ """
+ # set up required variables to create a snapshot policy
+ self.validate_parameters()
+ options = {'policy': self.parameters['name'],
+ 'enabled': str(self.parameters['enabled']),
+ }
+
+ if 'snapmirror_label' in self.parameters:
+ snapmirror_labels = self.parameters['snapmirror_label']
+ else:
+ # User hasn't supplied any snapmirror labels.
+ snapmirror_labels = [None] * len(self.parameters['schedule'])
+
+ if 'prefix' in self.parameters:
+ prefixes = self.parameters['prefix']
+ else:
+ # User hasn't supplied any prefixes.
+ prefixes = [None] * len(self.parameters['schedule'])
+
+ # zapi attribute for first schedule is schedule1, second is schedule2 and so on
+ positions = [str(i) for i in range(1, len(self.parameters['schedule']) + 1)]
+ for schedule, prefix, count, snapmirror_label, position in \
+ zip(self.parameters['schedule'], prefixes,
+ self.parameters['count'], snapmirror_labels, positions):
+ schedule = schedule.strip()
+ options['count' + position] = str(count)
+ options['schedule' + position] = schedule
+ if snapmirror_label is not None:
+ snapmirror_label = snapmirror_label.strip()
+ if snapmirror_label != '':
+ options['snapmirror-label' + position] = snapmirror_label
+ if prefix is not None:
+ prefix = prefix.strip()
+ if prefix != '':
+ options['prefix' + position] = prefix
+
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-policy-create', **options)
+
+ # Set up optional variables to create a snapshot policy
+ if self.parameters.get('comment'):
+ snapshot_obj.add_new_child("comment", self.parameters['comment'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_snapshot_policy(self):
+ """
+ Deletes an existing snapshot policy
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-policy-delete")
+
+ # Set up required variables to delete a snapshot policy
+ snapshot_obj.add_new_child("policy", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ if 'vserver' in self.parameters:
+ netapp_utils.ems_log_event(event_name, self.server)
+ else:
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ """
+ Check to see which play we should run
+ """
+ self.asup_log_for_cserver("na_ontap_snapshot_policy")
+ current = self.get_snapshot_policy()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ # Don't sort schedule/prefix/count/snapmirror_label lists as it can
+ # mess up the intended parameter order.
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_snapshot_policy()
+ elif cd_action == 'delete':
+ self.delete_snapshot_policy()
+ if modify:
+ self.modify_snapshot_policy(current)
+ self.modify_snapshot_policy_schedules(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates and deletes a Snapshot Policy
+ """
+ obj = NetAppOntapSnapshotPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py
new file mode 100644
index 00000000..ae49a721
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+"""
+create SNMP module to add/delete/modify SNMP user
+"""
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create/Delete SNMP community"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_snmp
+options:
+ access_control:
+ description:
+ - "Access control for the community. The only supported value is 'ro' (read-only)"
+ required: true
+ type: str
+ community_name:
+ description:
+ - "The name of the SNMP community to manage."
+ required: true
+ type: str
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified SNMP community should exist or not."
+ default: 'present'
+ type: str
+short_description: NetApp ONTAP SNMP community
+version_added: 2.6.0
+'''
+
+EXAMPLES = """
+ - name: Create SNMP community
+ na_ontap_snmp:
+ state: present
+ community_name: communityName
+ access_control: 'ro'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete SNMP community
+ na_ontap_snmp:
+ state: absent
+ community_name: communityName
+ access_control: 'ro'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSnmp(object):
+ '''Class with SNMP methods, doesn't support check mode'''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ community_name=dict(required=True, type='str'),
+ access_control=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ parameters = self.module.params
+ # set up state variables
+ self.state = parameters['state']
+ self.community_name = parameters['community_name']
+ self.access_control = parameters['access_control']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def invoke_snmp_community(self, zapi):
+ """
+ Invoke zapi - add/delete take the same NaElement structure
+ @return: SUCCESS / FAILURE with an error_message
+ """
+ snmp_community = netapp_utils.zapi.NaElement.create_node_with_children(
+ zapi, **{'community': self.community_name,
+ 'access-control': self.access_control})
+ try:
+ self.server.invoke_successfully(snmp_community, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError: # return False for duplicate entry
+ return False
+ return True
+
+ def add_snmp_community(self):
+ """
+ Adds a SNMP community
+ """
+ return self.invoke_snmp_community('snmp-community-add')
+
+ def delete_snmp_community(self):
+ """
+ Delete a SNMP community
+ """
+ return self.invoke_snmp_community('snmp-community-delete')
+
+ def apply(self):
+ """
+ Apply action to SNMP community
+ This module is not idempotent:
+ Add doesn't fail the playbook if user is trying
+ to add an already existing snmp community
+ """
+ changed = False
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_snmp", cserver)
+ if self.state == 'present': # add
+ if self.add_snmp_community():
+ changed = True
+ elif self.state == 'absent': # delete
+ if self.delete_snmp_community():
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ '''Execute action'''
+ community_obj = NetAppONTAPSnmp()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py
new file mode 100644
index 00000000..916bd5a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+"""
+create SNMP module to add/delete/modify SNMP user
+"""
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_snmp_traphosts
+short_description: NetApp ONTAP SNMP traphosts.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Whether the specified SNMP traphost should exist or not. Requires REST with 9.7 or higher
+options:
+ ip_address:
+ description:
+ - "The IP address of the SNMP traphost to manage."
+ required: true
+ type: str
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified SNMP traphost should exist or not."
+ default: 'present'
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create SNMP traphost
+ na_ontap_snmp:
+ state: present
+ ip_address: '10.10.10.10'
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Delete SNMP traphost
+ na_ontap_snmp:
+ state: absent
+ ip_address: '10.10.10.10'
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPSnmpTraphosts(object):
+ """Class with SNMP methods"""
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ ip_address=dict(required=True, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ if not self.rest_api.is_rest():
+ self.module.fail_json(msg="na_ontap_snmp_traphosts only support Rest and ONTAP 9.6+")
+
+ def get_snmp_traphosts(self):
+ params = {'ip_address': self.parameters['ip_address']}
+ api = 'support/snmp/traphosts'
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ if not message['records']:
+ return None
+ return message['records']
+
+ def create_snmp_traphost(self):
+ api = '/support/snmp/traphosts'
+ params = {'host': self.parameters['ip_address']}
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def delete_snmp_traphost(self):
+ api = '/support/snmp/traphosts/' + self.parameters['ip_address']
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg="Error deleting traphost: %s" % error)
+
+ def apply(self):
+ """
+ Apply action to SNMP traphost
+ """
+ current = self.get_snmp_traphosts()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_snmp_traphost()
+ elif cd_action == 'delete':
+ self.delete_snmp_traphost()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPSnmpTraphosts()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py
new file mode 100644
index 00000000..27d3ec48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py
@@ -0,0 +1,417 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_software_update
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Update ONTAP software
+ - Requires an https connection and is not supported over http
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_software_update
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified ONTAP package should update or not.
+ default: present
+ type: str
+ nodes:
+ description:
+ - List of nodes to be updated, the nodes have to be a part of a HA Pair.
+ aliases:
+ - node
+ type: list
+ elements: str
+ package_version:
+ required: true
+ description:
+ - Specifies the package version to update software.
+ type: str
+ package_url:
+ required: true
+ type: str
+ description:
+ - Specifies the package URL to download the package.
+ ignore_validation_warning:
+ description:
+ - Allows the update to continue if warnings are encountered during the validation phase.
+ default: False
+ type: bool
+ download_only:
+ description:
+ - Allows to download image without update.
+ default: False
+ type: bool
+ version_added: 20.4.0
+ stabilize_minutes:
+ description:
+ - Number of minutes that the update should wait after a takeover or giveback is completed.
+ type: int
+ version_added: 20.6.0
+ timeout:
+ description:
+ - how long to wait for the update to complete, in seconds.
+ default: 1800
+ type: int
+ force_update:
+ description:
+ - force an update, even if package_version matches what is reported as installed.
+ default: false
+ type: bool
+ version_added: 20.11.0
+short_description: NetApp ONTAP Update Software
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ - name: ONTAP software update
+ na_ontap_software_update:
+ state: present
+ nodes: vsim1
+ package_url: "{{ url }}"
+ package_version: "{{ version_name }}"
+ ignore_validation_warning: True
+ download_only: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSoftwareUpdate(object):
+ """
+ Class with ONTAP software update methods
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ nodes=dict(required=False, type='list', elements='str', aliases=["node"]),
+ package_version=dict(required=True, type='str'),
+ package_url=dict(required=True, type='str'),
+ ignore_validation_warning=dict(required=False, type='bool', default=False),
+ download_only=dict(required=False, type='bool', default=False),
+ stabilize_minutes=dict(required=False, type='int'),
+ timeout=dict(required=False, type='int', default=1800),
+ force_update=dict(required=False, type='bool', default=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ @staticmethod
+ def cluster_image_get_iter():
+ """
+ Compose NaElement object to query current version
+ :return: NaElement object for cluster-image-get-iter with query
+ """
+ cluster_image_get = netapp_utils.zapi.NaElement('cluster-image-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cluster_image_info = netapp_utils.zapi.NaElement('cluster-image-info')
+ query.add_child_elem(cluster_image_info)
+ cluster_image_get.add_child_elem(query)
+ return cluster_image_get
+
+ def cluster_image_get(self):
+ """
+ Get current cluster image info
+ :return: True if query successful, else return None
+ """
+ cluster_image_get_iter = self.cluster_image_get_iter()
+ try:
+ result = self.server.invoke_successfully(cluster_image_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image details: %s: %s'
+ % (self.parameters['package_version'], to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image details
+ node_versions = list()
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ for image_info in result.get_child_by_name('attributes-list').get_children():
+ node_versions.append((image_info.get_child_content('node-id'), image_info.get_child_content('current-version')))
+ return node_versions
+
+ def cluster_image_get_for_node(self, node_name):
+ """
+ Get current cluster image info for given node
+ """
+ cluster_image_get = netapp_utils.zapi.NaElement('cluster-image-get')
+ cluster_image_get.add_new_child('node-id', node_name)
+ try:
+ result = self.server.invoke_successfully(cluster_image_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image details for %s: %s'
+ % (node_name, to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image version
+ if result.get_child_by_name('attributes').get_child_by_name('cluster-image-info'):
+ image_info = result.get_child_by_name('attributes').get_child_by_name('cluster-image-info')
+ if image_info:
+ return image_info.get_child_content('node-id'), image_info.get_child_content('current-version')
+ return None, None
+
+ @staticmethod
+ def get_localname(tag):
+ return netapp_utils.zapi.etree.QName(tag).localname
+
+ def cluster_image_update_progress_get(self, ignore_connection_error=True):
+ """
+ Get current cluster image update progress info
+ :return: Dictionary of cluster image update progress if query successful, else return None
+ """
+ cluster_update_progress_get = netapp_utils.zapi.NaElement('cluster-image-update-progress-info')
+ cluster_update_progress_info = dict()
+ try:
+ result = self.server.invoke_successfully(cluster_update_progress_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # return empty dict on error to satisfy package delete upon image update
+ if ignore_connection_error:
+ return cluster_update_progress_info
+ self.module.fail_json(msg='Error fetching cluster image update progress details: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image update progress details
+ if result.get_child_by_name('attributes').get_child_by_name('ndu-progress-info'):
+ update_progress_info = result.get_child_by_name('attributes').get_child_by_name('ndu-progress-info')
+ cluster_update_progress_info['overall_status'] = update_progress_info.get_child_content('overall-status')
+ cluster_update_progress_info['completed_node_count'] = update_progress_info.\
+ get_child_content('completed-node-count')
+ reports = update_progress_info.get_child_by_name('validation-reports')
+ if reports:
+ cluster_update_progress_info['validation_reports'] = list()
+ for report in reports.get_children():
+ checks = dict()
+ for check in report.get_children():
+ checks[self.get_localname(check.get_name())] = check.get_content()
+ cluster_update_progress_info['validation_reports'].append(checks)
+ return cluster_update_progress_info
+
+ def cluster_image_update(self):
+ """
+ Update current cluster image
+ """
+ cluster_update_info = netapp_utils.zapi.NaElement('cluster-image-update')
+ cluster_update_info.add_new_child('package-version', self.parameters['package_version'])
+ cluster_update_info.add_new_child('ignore-validation-warning',
+ str(self.parameters['ignore_validation_warning']))
+ if self.parameters.get('stabilize_minutes'):
+ cluster_update_info.add_new_child('stabilize-minutes',
+ self.na_helper.get_value_for_int(False, self.parameters['stabilize_minutes']))
+ if self.parameters.get('nodes'):
+ cluster_nodes = netapp_utils.zapi.NaElement('nodes')
+ for node in self.parameters['nodes']:
+ cluster_nodes.add_new_child('node-name', node)
+ cluster_update_info.add_child_elem(cluster_nodes)
+ try:
+ self.server.invoke_successfully(cluster_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ msg = 'Error updating cluster image for %s: %s' % (self.parameters['package_version'], to_native(error))
+ cluster_update_progress_info = self.cluster_image_update_progress_get(ignore_connection_error=True)
+ validation_reports = str(cluster_update_progress_info.get('validation_reports'))
+ if validation_reports == "None":
+ validation_reports = str(self.cluster_image_validate())
+ self.module.fail_json(msg=msg, validation_reports=validation_reports, exception=traceback.format_exc())
+
+ def cluster_image_package_download(self):
+ """
+ Get current cluster image package download
+ :return: True if package already exists, else return False
+ """
+ cluster_image_package_download_info = netapp_utils.zapi.NaElement('cluster-image-package-download')
+ cluster_image_package_download_info.add_new_child('package-url', self.parameters['package_url'])
+ try:
+ self.server.invoke_successfully(cluster_image_package_download_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 18408 denotes Package image with the same name already exists
+ if to_native(error.code) == "18408":
+ # TODO: if another package is using the same image name, we're stuck
+ return True
+ else:
+ self.module.fail_json(msg='Error downloading cluster image package for %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+ return False
+
+ def cluster_image_package_delete(self):
+ """
+ Delete current cluster image package
+ """
+ cluster_image_package_delete_info = netapp_utils.zapi.NaElement('cluster-image-package-delete')
+ cluster_image_package_delete_info.add_new_child('package-version', self.parameters['package_version'])
+ try:
+ self.server.invoke_successfully(cluster_image_package_delete_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cluster image package for %s: %s'
+ % (self.parameters['package_version'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def cluster_image_package_download_progress(self):
+ """
+ Get current cluster image package download progress
+ :return: Dictionary of cluster image download progress if query successful, else return None
+ """
+ cluster_image_package_download_progress_info = netapp_utils.zapi.\
+ NaElement('cluster-image-get-download-progress')
+ try:
+ result = self.server.invoke_successfully(
+ cluster_image_package_download_progress_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image package download progress for %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image download progress details
+ cluster_download_progress_info = dict()
+ if result.get_child_by_name('progress-status'):
+ cluster_download_progress_info['progress_status'] = result.get_child_content('progress-status')
+ cluster_download_progress_info['progress_details'] = result.get_child_content('progress-details')
+ cluster_download_progress_info['failure_reason'] = result.get_child_content('failure-reason')
+ return cluster_download_progress_info
+ return None
+
+ def cluster_image_validate(self):
+ """
+ Validate that NDU is feasible.
+ :return: List of dictionaries
+ """
+ cluster_image_validation_info = netapp_utils.zapi.NaElement('cluster-image-validate')
+ cluster_image_validation_info.add_new_child('package-version', self.parameters['package_version'])
+ try:
+ result = self.server.invoke_successfully(
+ cluster_image_validation_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ msg = 'Error running cluster image validate: %s' % to_native(error)
+ return msg
+ # return cluster validation report
+ cluster_report_info = list()
+ if result.get_child_by_name('cluster-image-validation-report-list'):
+ for report in result.get_child_by_name('cluster-image-validation-report-list').get_children():
+ cluster_report_info.append(dict(
+ ndu_check=report.get_child_content('ndu-check'),
+ ndu_status=report.get_child_content('ndu-status'),
+ required_action=report.get_child_content('required-action')
+ ))
+ return cluster_report_info
+
+ def autosupport_log(self):
+ """
+ Autosupport log for software_update
+ :return:
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_software_update", cserver)
+
+ def is_update_required(self):
+ ''' return True if at least one node is not at the correct version '''
+ if self.parameters.get('nodes'):
+ versions = [self.cluster_image_get_for_node(node) for node in self.parameters['nodes']]
+ else:
+ versions = self.cluster_image_get()
+ current_versions = set([x[1] for x in versions])
+ if len(current_versions) != 1:
+ # mixed set, need to update
+ return True
+ # only update if versions differ
+ return current_versions.pop() != self.parameters['package_version']
+
+ def apply(self):
+ """
+ Apply action to update ONTAP software
+ """
+ # TODO: cluster image update only works for HA configurations.
+ # check if node image update can be used for other cases.
+ if self.parameters.get('https') is not True:
+ self.module.fail_json(msg='https parameter must be True')
+ self.autosupport_log()
+ changed = self.parameters['force_update'] or self.is_update_required()
+ validation_reports = 'only available after update'
+ if not self.module.check_mode and changed:
+ if self.parameters.get('state') == 'present':
+ package_exists = self.cluster_image_package_download()
+ if package_exists is False:
+ cluster_download_progress = self.cluster_image_package_download_progress()
+ while cluster_download_progress.get('progress_status') == 'async_pkg_get_phase_running':
+ time.sleep(5)
+ cluster_download_progress = self.cluster_image_package_download_progress()
+ if not cluster_download_progress.get('progress_status') == 'async_pkg_get_phase_complete':
+ self.module.fail_json(msg='Error downloading package: %s'
+ % (cluster_download_progress['failure_reason']))
+ if self.parameters['download_only'] is False:
+ self.cluster_image_update()
+ # delete package once update is completed
+ cluster_update_progress = dict()
+ time_left = self.parameters['timeout']
+ polling_interval = 25
+ # assume in_progress if dict is empty
+ while time_left > 0 and cluster_update_progress.get('overall_status', 'in_progress') == 'in_progress':
+ time.sleep(polling_interval)
+ time_left -= polling_interval
+ cluster_update_progress = self.cluster_image_update_progress_get(ignore_connection_error=True)
+ if cluster_update_progress.get('overall_status') == 'completed':
+ validation_reports = str(cluster_update_progress.get('validation_reports'))
+ self.cluster_image_package_delete()
+ else:
+ cluster_update_progress = self.cluster_image_update_progress_get(ignore_connection_error=False)
+ if cluster_update_progress.get('overall_status') != 'completed':
+ if cluster_update_progress.get('overall_status') == 'in_progress':
+ msg = 'Timeout error'
+ action = ' Should the timeout value be increased? Current value is %d seconds.' % self.parameters['timeout']
+ action += ' The software update continues in background.'
+ else:
+ msg = 'Error'
+ action = ''
+ msg += ' updating image: overall_status: %s.' % (cluster_update_progress.get('overall_status', 'cannot get status'))
+ msg += action
+ validation_reports = str(cluster_update_progress.get('validation_reports'))
+ self.module.fail_json(msg=msg, validation_reports=validation_reports)
+
+ self.module.exit_json(changed=changed, validation_reports=validation_reports)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPSoftwareUpdate()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py
new file mode 100644
index 00000000..d45817f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Run cli commands on ONTAP over SSH using paramiko.
+ - Output is returned in C(stdout) and C(stderr), and also as C(stdout_lines), C(stdout_lines_filtered), C(stderr_lines).
+ - Note that the module can succeed even though the command failed. You need to analyze stdout and check the results.
+ - If the SSH host key is unknown and accepted, C(warnings) is updated.
+ - Options related to ZAPI or REST APIs are ignored.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_ssh_command
+short_description: NetApp ONTAP Run any cli command over plain SSH using paramiko.
+version_added: 20.8.0
+options:
+ command:
+ description:
+ - a string containing the command and arguments.
+ required: true
+ type: str
+ privilege:
+ description:
+ - privilege level at which to run the command, eg admin, advanced.
+ - if set, the command is prefixed with C(set -privilege <privilege>;).
+ type: str
+ accept_unknown_host_keys:
+ description:
+ - When false, reject the connection if the host key is not in known_hosts file.
+ - When true, if the host key is unknown, accept it, but report a warning.
+ - Note that the key is not added to the file. You could add the key by manually using SSH.
+ type: bool
+ default: false
+ include_lines:
+ description:
+ - return only lines containing string pattern in C(stdout_lines_filtered)
+ default: ''
+ type: str
+ exclude_lines:
+ description:
+ - return only lines containing string pattern in C(stdout_lines_filtered)
+ default: ''
+ type: str
+ service_processor:
+ description:
+ - whether the target system is ONTAP or the service processor (SP)
+ - only menaningful when privilege is set
+ aliases: [sp]
+ default: false
+ type: bool
+'''
+
+EXAMPLES = """
+ - name: run ontap cli command using SSH
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: version
+
+ # Same as above, with parameters
+ - name: run ontap cli command
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: node show -fields node,health,uptime,model
+ privilege: admin
+
+ # Same as above, but with lines filtering
+ - name: run ontap cli command
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: node show -fields node,health,uptime,model
+ exclude_lines: 'ode ' # Exclude lines with 'Node ' or 'node'
+ # use with caution!
+ accept_unknown_host_keys: true
+ privilege: admin
+
+ - name: run ontap SSH command on SP
+ na_ontap_ssh_command:
+ # <<: *sp_login
+ command: sp switch-version
+ privilege: diag
+ sp: true
+ register: result
+ - debug: var=result
+"""
+
+RETURN = """
+stdout_lines_filtered:
+ description:
+ - In addition to stdout and stdout_lines, a list of non-white lines, excluding last and failed login information.
+ - The list can be further refined using the include_lines and exclude_lines filters.
+ returned: always
+ type: list
+"""
+
+import traceback
+import warnings
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+
+class NetAppONTAPSSHCommand(object):
+ ''' calls a CLI command using SSH'''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='str'),
+ privilege=dict(required=False, type='str'),
+ accept_unknown_host_keys=dict(required=False, type='bool', default=False),
+ include_lines=dict(required=False, type='str', default=''),
+ exclude_lines=dict(required=False, type='str', default=''),
+ service_processor=dict(required=False, type='bool', default=False, aliases=['sp']),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.privilege = parameters['privilege']
+ self.include_lines = parameters['include_lines']
+ self.exclude_lines = parameters['exclude_lines']
+ self.accept_unknown_host_keys = parameters['accept_unknown_host_keys']
+ self.service_processor = parameters['service_processor']
+ self.warnings = list()
+ self.failed = False
+
+ if not HAS_PARAMIKO:
+ self.module.fail_json(msg="the python paramiko module is required")
+
+ client = paramiko.SSHClient()
+ client.load_system_host_keys() # load ~/.ssh/known_hosts if it exists
+ if self.accept_unknown_host_keys:
+ # accept unknown key, but raise a python warning
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ with warnings.catch_warnings(record=True) as wngs:
+ try:
+ client.connect(hostname=parameters['hostname'], username=parameters['username'], password=parameters['password'])
+ if len(wngs) > 0:
+ self.warnings.extend([str(warning.message) for warning in wngs])
+ except paramiko.SSHException as exc:
+ self.module.fail_json(msg="SSH connection failed: %s" % repr(exc))
+
+ self.client = client
+
+ def parse_output(self, out):
+ out_string = out.read()
+ # ONTAP makes copious use of \r
+ out_string = out_string.replace(b'\r\r\n', b'\n')
+ out_string = out_string.replace(b'\r\n', b'\n')
+ return(out_string)
+
+ def run_ssh_command(self, command):
+ ''' calls SSH '''
+ try:
+ stdin, stdout, stderr = self.client.exec_command(command)
+ except paramiko.SSHException as exc:
+ self.module.fail_json(msg='Error running command %s: %s' %
+ (command, to_native(exc)),
+ exception=traceback.format_exc())
+ stdin.close() # if we don't close, we may see a TypeError
+ return stdout, stderr
+
+ def filter_output(self, output):
+ ''' Generate stdout_lines_filtered list
+ Remove login information if found in the first non white lines
+ '''
+ result = list()
+ find_banner = True
+ for line in output.splitlines():
+ try:
+ stripped_line = line.strip().decode()
+ except Exception as exc:
+ self.warnings.append("Unable to decode ONTAP output. Skipping filtering. Error: %s" % repr(exc))
+ result.append('ERROR: truncated, cannot decode: %s' % line)
+ self.failed = False
+ return result
+
+ if not stripped_line:
+ continue
+ if find_banner and stripped_line.startswith(('Last login time:', 'Unsuccessful login attempts since last login:')):
+ continue
+ find_banner = False
+ if self.exclude_lines:
+ if self.include_lines in stripped_line and self.exclude_lines not in stripped_line:
+ result.append(stripped_line)
+ elif self.include_lines:
+ if self.include_lines in stripped_line:
+ result.append(stripped_line)
+ else:
+ result.append(stripped_line)
+
+ return result
+
+ def run_command(self):
+ ''' calls SSH '''
+ # self.ems()
+ command = self.command
+ if self.privilege is not None:
+ if self.service_processor:
+ command = "priv set %s;%s" % (self.privilege, command)
+ else:
+ command = "set -privilege %s;%s" % (self.privilege, command)
+ stdout, stderr = self.run_ssh_command(command)
+ stdout_string = self.parse_output(stdout)
+ stdout_filtered = self.filter_output(stdout_string)
+ return stdout_string, stdout_filtered, self.parse_output(stderr)
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = True
+ stdout, filtered, stderr = '', '', ''
+ if not self.module.check_mode:
+ stdout, filtered, stderr = self.run_command()
+ if stderr:
+ self.failed = True
+ self.module.exit_json(changed=changed, failed=self.failed, stdout=stdout, stdout_lines_filtered=filtered, stderr=stderr, warnings=self.warnings)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPSSHCommand()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py
new file mode 100644
index 00000000..c53dee2c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py
@@ -0,0 +1,540 @@
+#!/usr/bin/python
+
+# (c) 2018-2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_svm
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_svm
+
+short_description: NetApp ONTAP SVM
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, modify or delete SVM on NetApp ONTAP
+
+options:
+
+ state:
+ description:
+ - Whether the specified SVM should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the SVM to manage.
+ type: str
+ required: true
+
+ from_name:
+ description:
+ - Name of the SVM to be renamed
+ type: str
+ version_added: 2.7.0
+
+ root_volume:
+ description:
+ - Root volume of the SVM.
+ - Cannot be modified after creation.
+ type: str
+
+ root_volume_aggregate:
+ description:
+ - The aggregate on which the root volume will be created.
+ - Cannot be modified after creation.
+ type: str
+
+ root_volume_security_style:
+ description:
+ - Security Style of the root volume.
+ - When specified as part of the vserver-create,
+ this field represents the security style for the Vserver root volume.
+ - When specified as part of vserver-get-iter call,
+ this will return the list of matching Vservers.
+ - The 'unified' security style, which applies only to Infinite Volumes,
+ cannot be applied to a Vserver's root volume.
+ - Cannot be modified after creation.
+ choices: ['unix', 'ntfs', 'mixed', 'unified']
+ type: str
+
+ allowed_protocols:
+ description:
+ - Allowed Protocols.
+ - When specified as part of a vserver-create,
+ this field represent the list of protocols allowed on the Vserver.
+ - When part of vserver-get-iter call,
+ this will return the list of Vservers
+ which have any of the protocols specified
+ as part of the allowed-protocols.
+ - When part of vserver-modify,
+ this field should include the existing list
+ along with new protocol list to be added to prevent data disruptions.
+ - Possible values
+ - nfs NFS protocol,
+ - cifs CIFS protocol,
+ - fcp FCP protocol,
+ - iscsi iSCSI protocol,
+ - ndmp NDMP protocol,
+ - http HTTP protocol,
+ - nvme NVMe protocol
+ type: list
+ elements: str
+
+ aggr_list:
+ description:
+ - List of aggregates assigned for volume operations.
+ - These aggregates could be shared for use with other Vservers.
+ - When specified as part of a vserver-create,
+ this field represents the list of aggregates
+ that are assigned to the Vserver for volume operations.
+ - When part of vserver-get-iter call,
+ this will return the list of Vservers
+ which have any of the aggregates specified as part of the aggr list.
+ type: list
+ elements: str
+
+ ipspace:
+ description:
+ - IPSpace name
+ - Cannot be modified after creation.
+ type: str
+ version_added: 2.7.0
+
+
+ snapshot_policy:
+ description:
+ - Default snapshot policy setting for all volumes of the Vserver.
+ This policy will be assigned to all volumes created in this
+ Vserver unless the volume create request explicitly provides a
+ snapshot policy or volume is modified later with a specific
+ snapshot policy. A volume-level snapshot policy always overrides
+ the default Vserver-wide snapshot policy.
+ version_added: 2.7.0
+ type: str
+
+ language:
+ description:
+ - Language to use for the SVM
+ - Default to C.UTF-8
+ - Possible values Language
+ - c POSIX
+ - ar Arabic
+ - cs Czech
+ - da Danish
+ - de German
+ - en English
+ - en_us English (US)
+ - es Spanish
+ - fi Finnish
+ - fr French
+ - he Hebrew
+ - hr Croatian
+ - hu Hungarian
+ - it Italian
+ - ja Japanese euc-j
+ - ja_v1 Japanese euc-j
+ - ja_jp.pck Japanese PCK (sjis)
+ - ja_jp.932 Japanese cp932
+ - ja_jp.pck_v2 Japanese PCK (sjis)
+ - ko Korean
+ - no Norwegian
+ - nl Dutch
+ - pl Polish
+ - pt Portuguese
+ - ro Romanian
+ - ru Russian
+ - sk Slovak
+ - sl Slovenian
+ - sv Swedish
+ - tr Turkish
+ - zh Simplified Chinese
+ - zh.gbk Simplified Chinese (GBK)
+ - zh_tw Traditional Chinese euc-tw
+ - zh_tw.big5 Traditional Chinese Big 5
+ - utf8mb4
+ - Most of the values accept a .utf_8 suffix, e.g. fr.utf_8
+ type: str
+ version_added: 2.7.0
+
+ subtype:
+ description:
+ - The subtype for vserver to be created.
+ - Cannot be modified after creation.
+ choices: ['default', 'dp_destination', 'sync_source', 'sync_destination']
+ type: str
+ version_added: 2.7.0
+
+ comment:
+ description:
+ - When specified as part of a vserver-create, this field represents the comment associated with the Vserver.
+ - When part of vserver-get-iter call, this will return the list of matching Vservers.
+ type: str
+ version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create SVM
+ na_ontap_svm:
+ state: present
+ name: ansibleVServer
+ root_volume: vol1
+ root_volume_aggregate: aggr1
+ root_volume_security_style: mixed
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+import copy
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.zapis_svm as zapis
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapSVM(object):
+ ''' create, delete, modify, rename SVM (aka vserver) '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ root_volume=dict(type='str'),
+ root_volume_aggregate=dict(type='str'),
+ root_volume_security_style=dict(type='str', choices=['unix',
+ 'ntfs',
+ 'mixed',
+ 'unified'
+ ]),
+ allowed_protocols=dict(type='list', elements='str'),
+ aggr_list=dict(type='list', elements='str'),
+ ipspace=dict(type='str', required=False),
+ snapshot_policy=dict(type='str', required=False),
+ language=dict(type='str', required=False),
+ subtype=dict(type='str', choices=['default', 'dp_destination', 'sync_source', 'sync_destination']),
+ comment=dict(type="str", required=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Ontap documentation uses C.UTF-8, but actually stores as c.utf_8.
+ if 'language' in self.parameters and self.parameters['language'].lower() == 'c.utf-8':
+ self.parameters['language'] = 'c.utf_8'
+
+ self.rest_api = OntapRestAPI(self.module)
+ # with REST, to force synchronous operations
+ self.timeout = self.rest_api.timeout
+ # root volume not supported with rest api
+ unsupported_rest_properties = ['root_volume', 'root_volume_aggregate', 'root_volume_security_style']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ @staticmethod
+ def clean_up_output(vserver_details):
+ vserver_details['root_volume'] = None
+ vserver_details['root_volume_aggregate'] = None
+ vserver_details['root_volume_security_style'] = None
+ vserver_details['aggr_list'] = []
+ for aggr in vserver_details['aggregates']:
+ vserver_details['aggr_list'].append(aggr['name'])
+ vserver_details.pop('aggregates')
+ vserver_details['ipspace'] = vserver_details['ipspace']['name']
+ vserver_details['snapshot_policy'] = vserver_details['snapshot_policy']['name']
+ vserver_details['allowed_protocols'] = []
+ if 'cifs' in vserver_details:
+ if vserver_details['cifs']['enabled']:
+ vserver_details['allowed_protocols'].append('cifs')
+ vserver_details.pop('cifs')
+ if 'fcp' in vserver_details:
+ if vserver_details['fcp']['enabled']:
+ vserver_details['allowed_protocols'].append('fcp')
+ vserver_details.pop('fcp')
+ if 'issi' in vserver_details:
+ if vserver_details['iscsi']['enabled']:
+ vserver_details['allowed_protocols'].append('iscsi')
+ vserver_details.pop('iscsi')
+ if 'nvme' in vserver_details:
+ if vserver_details['nvme']['enabled']:
+ vserver_details['allowed_protocols'].append('nvme')
+ vserver_details.pop('nvme')
+ if 'nfs' in vserver_details:
+ if vserver_details['nfs']['enabled']:
+ vserver_details['allowed_protocols'].append('nfs')
+ vserver_details.pop('nfs')
+ return vserver_details
+
+ def get_vserver(self, vserver_name=None):
+ """
+ Checks if vserver exists.
+
+ :return:
+ vserver object if vserver found
+ None if vserver is not found
+ :rtype: object/None
+ """
+ if vserver_name is None:
+ vserver_name = self.parameters['name']
+
+ if self.use_rest:
+ api = 'svm/svms'
+ params = {'fields': 'subtype,aggregates,language,snapshot_policy,ipspace,comment,nfs,cifs,fcp,iscsi,nvme'}
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response in get_net_route from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ vserver_details = None
+ for record in message['records']:
+ if record['name'] == vserver_name:
+ vserver_details = copy.deepcopy(record)
+ break
+ if vserver_details is None:
+ return None
+ return self.clean_up_output(vserver_details)
+
+ else:
+ return zapis.get_vserver(self.server, vserver_name)
+
+ def create_vserver(self):
+ if self.use_rest:
+ api = 'svm/svms'
+ params = {'name': self.parameters['name']}
+ if self.parameters.get('language'):
+ params['language'] = self.parameters['language']
+ if self.parameters.get('ipspace'):
+ params['ipspace'] = self.parameters['ipspace']
+ if self.parameters.get('snapshot_policy'):
+ params['snapshot_policy'] = self.parameters['snapshot_policy']
+ if self.parameters.get('subtype'):
+ params['subtype'] = self.parameters['subtype']
+ if self.parameters.get('comment'):
+ params['comment'] = self.parameters['comment']
+ if self.parameters.get('aggr_list'):
+ params['aggregates'] = []
+ for aggr in self.parameters['aggr_list']:
+ params['aggregates'].append({'name': aggr})
+ if self.parameters.get('allowed_protocols'):
+ for protocol in self.parameters['allowed_protocols']:
+ params[protocol] = {'enabled': 'true'}
+ # for a sync operation
+ data = {'return_timeout': self.timeout}
+ __, error = self.rest_api.post(api, params, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ options = {'vserver-name': self.parameters['name']}
+ self.add_parameter_to_dict(options, 'root_volume', 'root-volume')
+ self.add_parameter_to_dict(options, 'root_volume_aggregate', 'root-volume-aggregate')
+ self.add_parameter_to_dict(options, 'root_volume_security_style', 'root-volume-security-style')
+ self.add_parameter_to_dict(options, 'language', 'language')
+ self.add_parameter_to_dict(options, 'ipspace', 'ipspace')
+ self.add_parameter_to_dict(options, 'snapshot_policy', 'snapshot-policy')
+ self.add_parameter_to_dict(options, 'subtype', 'vserver-subtype')
+ self.add_parameter_to_dict(options, 'comment', 'comment')
+ vserver_create = netapp_utils.zapi.NaElement.create_node_with_children('vserver-create', **options)
+ try:
+ self.server.invoke_successfully(vserver_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error provisioning SVM %s: %s'
+ % (self.parameters['name'], to_native(exc)),
+ exception=traceback.format_exc())
+ # add allowed-protocols, aggr-list after creation,
+ # since vserver-create doesn't allow these attributes during creation
+ options = dict()
+ for key in ('allowed_protocols', 'aggr_list'):
+ if self.parameters.get(key):
+ options[key] = self.parameters[key]
+ if options:
+ self.modify_vserver(options)
+
+ def delete_vserver(self, current=None):
+ if self.use_rest:
+ if current is None:
+ self.module.fail_json(msg='Internal error, expecting SVM object in delete')
+ api = 'svm/svms/%s' % current['uuid']
+ # for a sync operation
+ query = {'return_timeout': self.timeout}
+ __, error = self.rest_api.delete(api, params=query)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-destroy', **{'vserver-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(vserver_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting SVM %s: %s'
+ % (self.parameters['name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def rename_vserver(self, current=None):
+ if self.use_rest:
+ if current is None:
+ self.module.fail_json(msg='Internal error, expecting SVM object in rename')
+ api = 'svm/svms/%s' % current['uuid']
+ params = {'name': self.parameters['name']}
+ # for a sync operation
+ data = {'return_timeout': self.timeout}
+ __, error = self.rest_api.patch(api, params, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-rename', **{'vserver-name': self.parameters['from_name'],
+ 'new-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(vserver_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error renaming SVM %s: %s'
+ % (self.parameters['from_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def modify_vserver(self, modify, current=None):
+ '''
+ Modify vserver.
+ :param modify: list of modify attributes
+ :param current: with rest, SVM object to modify
+ '''
+ if self.use_rest:
+ if current is None:
+ self.module.fail_json(msg='Internal error, expecting SVM object in modify')
+ api = 'svm/svms/%s' % current['uuid']
+ for attribute in modify:
+ if attribute == 'snapshot_policy' or attribute == 'allowed_protocols' or attribute == 'aggr_list':
+ self.module.fail_json(msg='REST API does not support modify of %s' % attribute)
+ # for a sync operation
+ data = {'return_timeout': self.timeout}
+ __, error = self.rest_api.patch(api, modify, data)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ zapis.modify_vserver(self.server, self.module, self.parameters['name'], modify, self.parameters)
+
+ def add_parameter_to_dict(self, adict, name, key=None, tostr=False):
+ '''
+ add defined parameter (not None) to adict using key.
+ :param adict: a dictionary.
+ :param name: name in self.parameters.
+ :param key: key in adict.
+ :param tostr: boolean.
+ '''
+ if key is None:
+ key = name
+ if self.parameters.get(name) is not None:
+ if tostr:
+ adict[key] = str(self.parameters.get(name))
+ else:
+ adict[key] = self.parameters.get(name)
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ if not self.use_rest:
+ self.asup_log_for_cserver("na_ontap_svm")
+ current = self.get_vserver()
+ cd_action, rename = None, None
+ if self.parameters.get('from_name'):
+ old_svm = self.get_vserver(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_svm, current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming SVM %s: no SVM with from_name %s.' % (self.parameters['name'], self.parameters['from_name']))
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ for attribute in modify:
+ if attribute in ['root_volume', 'root_volume_aggregate', 'root_volume_security_style', 'subtype', 'ipspace']:
+ self.module.fail_json(msg='Error modifying SVM %s: can not modify %s.' % (self.parameters['name'], attribute))
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_vserver(old_svm)
+ # If rename is True, cd_action is None, but modify could be true or false.
+ if cd_action == 'create':
+ self.create_vserver()
+ elif cd_action == 'delete':
+ self.delete_vserver(current)
+ elif modify:
+ self.modify_vserver(modify, current)
+
+ results = dict(changed=self.na_helper.changed)
+ if modify:
+ if netapp_utils.has_feature(self.module, 'show_modified'):
+ results['modify'] = str(modify)
+ if 'aggr_list' in modify:
+ if '*' in modify['aggr_list']:
+ results['warnings'] = "Changed always 'True' when aggr_list is '*'."
+ self.module.exit_json(**results)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ svm = NetAppOntapSVM()
+ svm.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py
new file mode 100644
index 00000000..3480a827
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP Modify SVM Options
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Modify ONTAP SVM Options
+ - Only Options that appear on "vserver options show" can be set
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_svm_options
+version_added: 2.7.0
+options:
+ name:
+ description:
+ - Name of the option.
+ type: str
+ value:
+ description:
+ - Value of the option.
+ - Value must be in quote
+ type: str
+ vserver:
+ description:
+ - The name of the vserver to which this option belongs to.
+ required: True
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Set SVM Options
+ na_ontap_svm_options:
+ vserver: "{{ netapp_vserver_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ name: snmp.enable
+ value: 'on'
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSvnOptions(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=False, type="str", default=None),
+ value=dict(required=False, type='str', default=None),
+ vserver=dict(required=True, type='str')
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def set_options(self):
+ """
+ Set a specific option
+ :return: None
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-set")
+ option_obj.add_new_child('name', self.parameters['name'])
+ option_obj.add_new_child('value', self.parameters['value'])
+ try:
+ self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error setting options: %s" % to_native(error), exception=traceback.format_exc())
+
+ def list_options(self):
+ """
+ List all Options on the Vserver
+ :return: None
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-list-info")
+ try:
+ self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting options: %s" % to_native(error), exception=traceback.format_exc())
+
+ def is_option_set(self):
+ """
+ Checks to see if an option is set or not
+ :return: If option is set return True, else return False
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-get-iter")
+ options_info = netapp_utils.zapi.NaElement("option-info")
+ if self.parameters.get('name') is not None:
+ options_info.add_new_child("name", self.parameters['name'])
+ if self.parameters.get('value') is not None:
+ options_info.add_new_child("value", self.parameters['value'])
+ if "vserver" in self.parameters.keys():
+ if self.parameters['vserver'] is not None:
+ options_info.add_new_child("vserver", self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement("query")
+ query.add_child_elem(options_info)
+ option_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error finding option: %s" % to_native(error), exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ return False
+
+ def apply(self):
+ changed = False
+ netapp_utils.ems_log_event("na_ontap_svm_options", self.server)
+ is_set = self.is_option_set()
+ if not is_set:
+ if self.module.check_mode:
+ pass
+ else:
+ self.set_options()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ :return: none
+ """
+ cg_obj = NetAppONTAPSvnOptions()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py
new file mode 100644
index 00000000..5c5f847b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+---
+
+module: na_ontap_ucadapter
+short_description: NetApp ONTAP UC adapter configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - modify the UC adapter mode and type taking pending type and mode into account.
+
+options:
+ state:
+ description:
+ - Whether the specified adapter should exist.
+ required: false
+ choices: ['present']
+ default: 'present'
+ type: str
+
+ adapter_name:
+ description:
+ - Specifies the adapter name.
+ required: true
+ type: str
+
+ node_name:
+ description:
+ - Specifies the adapter home node.
+ required: true
+ type: str
+
+ mode:
+ description:
+ - Specifies the mode of the adapter.
+ type: str
+
+ type:
+ description:
+ - Specifies the fc4 type of the adapter.
+ type: str
+
+ pair_adapters:
+ description:
+ - Specifies the list of adapters which also need to be offline along with the current adapter during modifying.
+ - If specified adapter works in a group or pair, the other adapters might also need to offline before modify the specified adapter.
+ - The mode of pair_adapters are modified along with the adapter, the type of the pair_adapters are not modified.
+ type: list
+ elements: str
+ version_added: '20.6.0'
+
+'''
+
+EXAMPLES = '''
+ - name: Modify adapter
+ na_ontap_adapter:
+ state: present
+ adapter_name: 0e
+ pair_adapters: 0f
+ node_name: laurentn-vsim1
+ mode: fc
+ type: target
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapadapter(object):
+ ''' object to describe adapter info '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present'], default='present', type='str'),
+ adapter_name=dict(required=True, type='str'),
+ node_name=dict(required=True, type='str'),
+ mode=dict(required=False, type='str'),
+ type=dict(required=False, type='str'),
+ pair_adapters=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_adapter(self):
+ """
+ Return details about the adapter
+ :param:
+ name : Name of the name of the adapter
+
+ :return: Details about the adapter. None if not found.
+ :rtype: dict
+ """
+ adapter_info = netapp_utils.zapi.NaElement('ucm-adapter-get')
+ adapter_info.add_new_child('adapter-name', self.parameters['adapter_name'])
+ adapter_info.add_new_child('node-name', self.parameters['node_name'])
+ try:
+ result = self.server.invoke_successfully(adapter_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching ucadapter details: %s: %s'
+ % (self.parameters['node_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes'):
+ adapter_attributes = result.get_child_by_name('attributes').\
+ get_child_by_name('uc-adapter-info')
+ return_value = {
+ 'mode': adapter_attributes.get_child_content('mode'),
+ 'pending-mode': adapter_attributes.get_child_content('pending-mode'),
+ 'type': adapter_attributes.get_child_content('fc4-type'),
+ 'pending-type': adapter_attributes.get_child_content('pending-fc4-type'),
+ 'status': adapter_attributes.get_child_content('status'),
+ }
+ return return_value
+ return None
+
+ def modify_adapter(self):
+ """
+ Modify the adapter.
+ """
+ params = {'adapter-name': self.parameters['adapter_name'],
+ 'node-name': self.parameters['node_name']}
+ if self.parameters.get('type') is not None:
+ params['fc4-type'] = self.parameters['type']
+ if self.parameters.get('mode') is not None:
+ params['mode'] = self.parameters['mode']
+ adapter_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ucm-adapter-modify', ** params)
+ try:
+ self.server.invoke_successfully(adapter_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying adapter %s: %s' % (self.parameters['adapter_name'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def online_or_offline_adapter(self, status, adapter_name):
+ """
+ Bring a Fibre Channel target adapter offline/online.
+ """
+ if status == 'down':
+ adapter = netapp_utils.zapi.NaElement('fcp-adapter-config-down')
+ elif status == 'up':
+ adapter = netapp_utils.zapi.NaElement('fcp-adapter-config-up')
+ adapter.add_new_child('fcp-adapter', adapter_name)
+ adapter.add_new_child('node', self.parameters['node_name'])
+ try:
+ self.server.invoke_successfully(adapter,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error trying to %s fc-adapter %s: %s' % (status, adapter_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ Autosupport log for ucadater
+ :return:
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_ucadapter", cserver)
+
+ def apply(self):
+ ''' calling all adapter features '''
+ changed = False
+ adapter_detail = self.get_adapter()
+
+ def need_to_change(expected, pending, current):
+ if expected is None:
+ return False
+ elif pending is not None:
+ return pending != expected
+ elif current is not None:
+ return current != expected
+ return False
+
+ if adapter_detail:
+ if self.parameters.get('type') is not None:
+ changed = need_to_change(self.parameters['type'], adapter_detail['pending-type'], adapter_detail['type'])
+ changed = changed or need_to_change(self.parameters.get('mode'), adapter_detail['pending-mode'], adapter_detail['mode'])
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.online_or_offline_adapter('down', self.parameters['adapter_name'])
+ if self.parameters.get('pair_adapters') is not None:
+ for adapter in self.parameters['pair_adapters']:
+ self.online_or_offline_adapter('down', adapter)
+ self.modify_adapter()
+ self.online_or_offline_adapter('up', self.parameters['adapter_name'])
+ if self.parameters.get('pair_adapters') is not None:
+ for adapter in self.parameters['pair_adapters']:
+ self.online_or_offline_adapter('up', adapter)
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ adapter = NetAppOntapadapter()
+ adapter.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py
new file mode 100644
index 00000000..dd589074
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+"""
+create Autosupport module to enable, disable or modify
+"""
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create/Delete Unix user group"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_unix_group
+options:
+ state:
+ description:
+ - Whether the specified group should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - Specifies UNIX group's name, unique for each group.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ id:
+ description:
+ - Specifies an identification number for the UNIX group.
+ - Group ID is unique for each UNIX group.
+ - Required for create, modifiable.
+ type: int
+
+ vserver:
+ description:
+ - Specifies the Vserver for the UNIX group.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ skip_name_validation:
+ description:
+ - Specifies if group name validation is skipped.
+ type: bool
+
+ users:
+ description:
+ - Specifies the users associated with this group. Should be comma separated.
+ - It represents the expected state of a list of users at any time.
+ - Add a user into group if it is specified in expected state but not in current state.
+ - Delete a user from group if it is specified in current state but not in expected state.
+ - To delete all current users, use '' as value.
+ type: list
+ elements: str
+ version_added: 2.9.0
+
+short_description: NetApp ONTAP UNIX Group
+version_added: 2.8.0
+
+"""
+
+EXAMPLES = """
+ - name: Create UNIX group
+ na_ontap_unix_group:
+ state: present
+ name: SampleGroup
+ vserver: ansibleVServer
+ id: 2
+ users: user1,user2
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete all users in UNIX group
+ na_ontap_unix_group:
+ state: present
+ name: SampleGroup
+ vserver: ansibleVServer
+ users: ''
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete UNIX group
+ na_ontap_unix_group:
+ state: absent
+ name: SampleGroup
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapUnixGroup(object):
+ """
+ Common operations to manage UNIX groups
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ id=dict(required=False, type='int'),
+ skip_name_validation=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str'),
+ users=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'name': 'group-name'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'id': 'group-id'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'skip_name_validation': 'skip-name-validation'
+ }
+
+ def get_unix_group(self):
+ """
+ Checks if the UNIX group exists.
+
+ :return:
+ dict() if group found
+ None if group is not found
+ """
+
+ get_unix_group = netapp_utils.zapi.NaElement('name-mapping-unix-group-get-iter')
+ attributes = {
+ 'query': {
+ 'unix-group-info': {
+ 'group-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ }
+ }
+ }
+ get_unix_group.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_unix_group, enable_tunneling=True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ group_info = result['attributes-list']['unix-group-info']
+ group_details = dict()
+ else:
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ group_details[item_key] = group_info[zapi_key]
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ group_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=group_info[zapi_key])
+ if group_info.get_child_by_name('users') is not None:
+ group_details['users'] = [user.get_child_content('user-name')
+ for user in group_info.get_child_by_name('users').get_children()]
+ else:
+ group_details['users'] = None
+ return group_details
+
+ def create_unix_group(self):
+ """
+ Creates an UNIX group in the specified Vserver
+
+ :return: None
+ """
+ if self.parameters.get('id') is None:
+ self.module.fail_json(msg='Error: Missing a required parameter for create: (id)')
+
+ group_create = netapp_utils.zapi.NaElement('name-mapping-unix-group-create')
+ group_details = {}
+ for item in self.parameters:
+ if item in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item)
+ group_details[zapi_key] = self.parameters[item]
+ elif item in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item)
+ group_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
+ value=self.parameters[item])
+ elif item in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(item)
+ group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=self.parameters[item])
+ group_create.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(group_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('users') is not None:
+ self.modify_users_in_group()
+
+ def delete_unix_group(self):
+ """
+ Deletes an UNIX group from a vserver
+
+ :return: None
+ """
+ group_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-group-destroy', **{'group-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(group_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_unix_group(self, params):
+ """
+ Modify an UNIX group from a vserver
+ :param params: modify parameters
+ :return: None
+ """
+ # modify users requires separate zapi.
+ if 'users' in params:
+ self.modify_users_in_group()
+ if len(params) == 1:
+ return
+
+ group_modify = netapp_utils.zapi.NaElement('name-mapping-unix-group-modify')
+ group_details = {'group-name': self.parameters['name']}
+ for key in params:
+ if key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(key)
+ group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=params[key])
+ group_modify.translate_struct(group_details)
+
+ try:
+ self.server.invoke_successfully(group_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_users_in_group(self):
+ """
+ Add/delete one or many users in a UNIX group
+
+ :return: None
+ """
+ current_users = self.get_unix_group().get('users')
+ expect_users = self.parameters.get('users')
+
+ if current_users is None:
+ current_users = []
+ if expect_users[0] == '' and len(expect_users) == 1:
+ expect_users = []
+
+ users_to_remove = list(set(current_users) - set(expect_users))
+ users_to_add = list(set(expect_users) - set(current_users))
+
+ if len(users_to_add) > 0:
+ for user in users_to_add:
+ add_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-add-user')
+ group_details = {'group-name': self.parameters['name'], 'user-name': user}
+ add_user.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(add_user, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error adding user %s to UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if len(users_to_remove) > 0:
+ for user in users_to_remove:
+ delete_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-delete-user')
+ group_details = {'group-name': self.parameters['name'], 'user-name': user}
+ delete_user.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(delete_user, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error deleting user %s from UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ Autosupport log for unix_group
+ :return: None
+ """
+ netapp_utils.ems_log_event("na_ontap_unix_group", self.server)
+
+ def apply(self):
+ """
+ Invoke appropriate action based on playbook parameters
+
+ :return: None
+ """
+ self.autosupport_log()
+ current = self.get_unix_group()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters['state'] == 'present' and cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_unix_group()
+ elif cd_action == 'delete':
+ self.delete_unix_group()
+ else:
+ self.modify_unix_group(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapUnixGroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py
new file mode 100644
index 00000000..40f05425
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_unix_user
+
+short_description: NetApp ONTAP UNIX users
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete or modify UNIX users local to ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - Specifies user's UNIX account name.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ group_id:
+ description:
+ - Specifies the primary group identification number for the UNIX user
+ - Required for create, modifiable.
+ type: int
+
+ vserver:
+ description:
+ - Specifies the Vserver for the UNIX user.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ id:
+ description:
+ - Specifies an identification number for the UNIX user.
+ - Required for create, modifiable.
+ type: int
+
+ full_name:
+ description:
+ - Specifies the full name of the UNIX user
+ - Optional for create, modifiable.
+ type: str
+'''
+
+EXAMPLES = """
+
+ - name: Create UNIX User
+ na_ontap_unix_user:
+ state: present
+ name: SampleUser
+ vserver: ansibleVServer
+ group_id: 1
+ id: 2
+ full_name: Test User
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete UNIX User
+ na_ontap_unix_user:
+ state: absent
+ name: SampleUser
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapUnixUser(object):
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ group_id=dict(required=False, type='int'),
+ id=dict(required=False, type='int'),
+ full_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_unix_user(self):
+ """
+ Checks if the UNIX user exists.
+
+ :return:
+ dict() if user found
+ None if user is not found
+ """
+
+ get_unix_user = netapp_utils.zapi.NaElement('name-mapping-unix-user-get-iter')
+ attributes = {
+ 'query': {
+ 'unix-user-info': {
+ 'user-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ }
+ }
+ }
+ get_unix_user.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_unix_user, enable_tunneling=True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ user_info = result['attributes-list']['unix-user-info']
+ return {'group_id': int(user_info['group-id']),
+ 'id': int(user_info['user-id']),
+ 'full_name': user_info['full-name']}
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_unix_user(self):
+ """
+ Creates an UNIX user in the specified Vserver
+
+ :return: None
+ """
+ if self.parameters.get('group_id') is None or self.parameters.get('id') is None:
+ self.module.fail_json(msg='Error: Missing one or more required parameters for create: (group_id, id)')
+
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-create', **{'user-name': self.parameters['name'],
+ 'group-id': str(self.parameters['group_id']),
+ 'user-id': str(self.parameters['id'])})
+ if self.parameters.get('full_name') is not None:
+ user_create.add_new_child('full-name', self.parameters['full_name'])
+
+ try:
+ self.server.invoke_successfully(user_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_unix_user(self):
+ """
+ Deletes an UNIX user from a vserver
+
+ :return: None
+ """
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-destroy', **{'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_unix_user(self, params):
+ user_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-modify', **{'user-name': self.parameters['name']})
+ for key in params:
+ if key == 'group_id':
+ user_modify.add_new_child('group-id', str(params['group_id']))
+ if key == 'id':
+ user_modify.add_new_child('user-id', str(params['id']))
+ if key == 'full_name':
+ user_modify.add_new_child('full-name', params['full_name'])
+
+ try:
+ self.server.invoke_successfully(user_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def autosupport_log(self):
+ """
+ Autosupport log for unix_user
+ :return: None
+ """
+ netapp_utils.ems_log_event("na_ontap_unix_user", self.server)
+
+ def apply(self):
+ """
+ Invoke appropriate action based on playbook parameters
+
+ :return: None
+ """
+ self.autosupport_log()
+ current = self.get_unix_user()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters['state'] == 'present' and cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_unix_user()
+ elif cd_action == 'delete':
+ self.delete_unix_user()
+ else:
+ self.modify_unix_user(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapUnixUser()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py
new file mode 100644
index 00000000..26690f0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py
@@ -0,0 +1,712 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_user
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_user
+
+short_description: NetApp ONTAP user configuration and management
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy users.
+
+options:
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ name:
+ description:
+ - The name of the user to manage.
+ required: true
+ type: str
+ applications:
+ description:
+ - List of application to grant access to.
+ - Creating a login with application console, telnet, rsh, and service-processor for a data Vserver is not supported.
+ - Module supports both service-processor and service_processor choices.
+ - ZAPI requires service-processor, while REST requires service_processor, except for an issue with ONTAP 9.6 and 9.7.
+ - snmp is not supported in REST.
+ required: true
+ type: list
+ elements: str
+ choices: ['console', 'http','ontapi','rsh','snmp','service_processor','service-processor','sp','ssh','telnet']
+ aliases:
+ - application
+ authentication_method:
+ description:
+ - Authentication method for the application.
+ - Not all authentication methods are valid for an application.
+ - Valid authentication methods for each application are as denoted in I(authentication_choices_description).
+ - Password for console application
+ - Password, domain, nsswitch, cert for http application.
+ - Password, domain, nsswitch, cert for ontapi application.
+ - Community for snmp application (when creating SNMPv1 and SNMPv2 users).
+ - The usm and community for snmp application (when creating SNMPv3 users).
+ - Password for sp application.
+ - Password for rsh application.
+ - Password for telnet application.
+ - Password, publickey, domain, nsswitch for ssh application.
+ required: true
+ type: str
+ choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert']
+ set_password:
+ description:
+ - Password for the user account.
+ - It is ignored for creating snmp users, but is required for creating non-snmp users.
+ - For an existing user, this value will be used as the new password.
+ type: str
+ role_name:
+ description:
+ - The name of the role. Required when C(state=present)
+ type: str
+ lock_user:
+ description:
+ - Whether the specified user account is locked.
+ type: bool
+ vserver:
+ description:
+ - The name of the vserver to use.
+ aliases:
+ - svm
+ required: true
+ type: str
+ authentication_protocol:
+ description:
+ - Authentication protocol for the snmp user.
+ - When cluster FIPS mode is on, 'sha' and 'sha2-256' are the only possible and valid values.
+ - When cluster FIPS mode is off, the default value is 'none'.
+ - When cluster FIPS mode is on, the default value is 'sha'.
+ - Only available for 'usm' authentication method and non modifiable.
+ choices: ['none', 'md5', 'sha', 'sha2-256']
+ type: str
+ version_added: '20.6.0'
+ authentication_password:
+ description:
+ - Password for the authentication protocol. This should be minimum 8 characters long.
+ - This is required for 'md5', 'sha' and 'sha2-256' authentication protocols and not required for 'none'.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ engine_id:
+ description:
+ - Authoritative entity's EngineID for the SNMPv3 user.
+ - This should be specified as a hexadecimal string.
+ - Engine ID with first bit set to 1 in first octet should have a minimum of 5 or maximum of 32 octets.
+ - Engine Id with first bit set to 0 in the first octet should be 12 octets in length.
+ - Engine Id cannot have all zeros in its address.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ privacy_protocol:
+ description:
+ - Privacy protocol for the snmp user.
+ - When cluster FIPS mode is on, 'aes128' is the only possible and valid value.
+ - When cluster FIPS mode is off, the default value is 'none'. When cluster FIPS mode is on, the default value is 'aes128'.
+ - Only available for 'usm' authentication method and non modifiable.
+ choices: ['none', 'des', 'aes128']
+ type: str
+ version_added: '20.6.0'
+ privacy_password:
+ description:
+ - Password for the privacy protocol. This should be minimum 8 characters long.
+ - This is required for 'des' and 'aes128' privacy protocols and not required for 'none'.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ remote_switch_ipaddress:
+ description:
+ - This optionally specifies the IP Address of the remote switch.
+ - The remote switch could be a cluster switch monitored by Cluster Switch Health Monitor (CSHM)
+ or a Fiber Channel (FC) switch monitored by Metro Cluster Health Monitor (MCC-HM).
+ - This is applicable only for a remote SNMPv3 user i.e. only if user is a remote (non-local) user,
+ application is snmp and authentication method is usm.
+ type: str
+ version_added: '20.6.0'
+'''
+
+EXAMPLES = """
+
+ - name: Create User
+ na_ontap_user:
+ state: present
+ name: SampleUser
+ applications: ssh,console
+ authentication_method: password
+ set_password: apn1242183u1298u41
+ lock_user: True
+ role_name: vsadmin
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete User
+ na_ontap_user:
+ state: absent
+ name: SampleUser
+ applications: ssh
+ authentication_method: password
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create user with snmp application (ZAPI)
+ na_ontap_user:
+ state: present
+ name: test_cert_snmp
+ applications: snmp
+ authentication_method: usm
+ role_name: admin
+ authentication_protocol: md5
+ authentication_password: '12345678'
+ privacy_protocol: 'aes128'
+ privacy_password: '12345678'
+ engine_id: '7063514941000000000000'
+ remote_switch_ipaddress: 10.0.0.0
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapUser(object):
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+
+ applications=dict(required=True, type='list', elements='str', aliases=['application'],
+ choices=['console', 'http', 'ontapi', 'rsh', 'snmp',
+ 'sp', 'service-processor', 'service_processor', 'ssh', 'telnet'],),
+ authentication_method=dict(required=True, type='str',
+ choices=['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert']),
+ set_password=dict(required=False, type='str', no_log=True),
+ role_name=dict(required=False, type='str'),
+ lock_user=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str', aliases=['svm']),
+ authentication_protocol=dict(required=False, type='str', choices=['none', 'md5', 'sha', 'sha2-256']),
+ authentication_password=dict(required=False, type='str', no_log=True),
+ engine_id=dict(required=False, type='str'),
+ privacy_protocol=dict(required=False, type='str', choices=['none', 'des', 'aes128']),
+ privacy_password=dict(required=False, type='str', no_log=True),
+ remote_switch_ipaddress=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['role_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API should be used for ONTAP 9.6 or higher
+ self.rest_api = OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['authentication_password', 'authentication_protocol', 'engine_id',
+ 'privacy_password', 'privacy_protocol']
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.use_rest, error = self.rest_api.is_rest(used_unsupported_rest_properties)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if not self.use_rest:
+ if not HAS_NETAPP_LIB:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ else:
+ if 'snmp' in self.parameters['applications']:
+ self.module.fail_json(msg="Snmp as application is not supported in REST.")
+
+ def get_user_rest(self):
+ api = 'security/accounts'
+ params = {
+ 'name': self.parameters['name']
+ }
+ if self.parameters.get('vserver') is None:
+ # vserser is empty for cluster
+ params['scope'] = 'cluster'
+ else:
+ params['owner.name'] = self.parameters['vserver']
+
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg='Error while fetching user info: %s' % error)
+ if message['num_records'] == 1:
+ return message['records'][0]['owner']['uuid'], message['records'][0]['name']
+ if message['num_records'] > 1:
+ self.module.fail_json(msg='Error while fetching user info, found multiple entries: %s' % repr(message))
+
+ return None
+
+ def get_user_details_rest(self, name, uuid):
+ params = {
+ 'fields': 'role,applications,locked'
+ }
+ api = "security/accounts/%s/%s" % (uuid, name)
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg='Error while fetching user details: %s' % error)
+ if message:
+ return_value = {
+ 'role_name': message['role']['name'],
+ 'applications': [app['application'] for app in message['applications']]
+ }
+ if "locked" in message:
+ return_value['lock_user'] = message['locked']
+ return return_value
+
+ def get_user(self, application=None):
+ """
+ Checks if the user exists.
+ :param: application: application to grant access to
+ :return:
+ Dictionary if user found
+ None if user is not found
+ """
+ security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-account-info', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'authentication-method': self.parameters['authentication_method']})
+ if application is not None:
+ query_details.add_new_child('application', application)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(security_login_get_iter,
+ enable_tunneling=False)
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ interface_attributes = result.get_child_by_name('attributes-list').\
+ get_child_by_name('security-login-account-info')
+ return_value = {
+ 'lock_user': interface_attributes.get_child_content('is-locked'),
+ 'role_name': interface_attributes.get_child_content('role-name')
+ }
+ return return_value
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 16034 denotes a user not being found.
+ if to_native(error.code) == "16034":
+ return None
+ # Error 16043 denotes the user existing, but the application missing
+ elif to_native(error.code) == "16043":
+ return None
+ else:
+ self.module.fail_json(msg='Error getting user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_user_rest(self, apps=None):
+ app_list = list()
+ if apps is not None:
+ for app in apps:
+ mydict = {
+ "application": app,
+ "authentication_methods": self.parameters['authentication_method'].split(),
+ }
+ app_list.append(mydict)
+ api = 'security/accounts'
+ params = {
+ 'name': self.parameters['name'],
+ 'role.name': self.parameters['role_name'],
+ 'applications': app_list
+ }
+ if self.parameters.get('vserver') is not None:
+ # vserser is empty for cluster
+ params['owner.name'] = self.parameters['vserver']
+ if 'set_password' in self.parameters:
+ params['password'] = self.parameters['set_password']
+ if 'lock_user' in self.parameters:
+ params['locked'] = self.parameters['lock_user']
+ dummy, error = self.rest_api.post(api, params)
+ error_sp = None
+ if error:
+ if 'invalid value' in error['message']:
+ if 'service-processor' in error['message'] or 'service_processor' in error['message']:
+ # find if there is error for service processor application value
+ # update value as per ONTAP version support
+ app_list_sp = params['applications']
+ for app_item in app_list_sp:
+ if 'service-processor' == app_item['application']:
+ app_item['application'] = 'service_processor'
+ elif 'service_processor' == app_item['application']:
+ app_item['application'] = 'service-processor'
+ params['applications'] = app_list_sp
+ # post again and throw first error in case of an error
+ dummy, error_sp = self.rest_api.post(api, params)
+ if error_sp:
+ self.module.fail_json(msg='Error while creating user: %s' % error)
+ return True
+
+ # non-sp errors thrown
+ if error:
+ self.module.fail_json(msg='Error while creating user: %s' % error)
+
+ def create_user(self, application):
+ """
+ creates the user for the given application and authentication_method
+ :param: application: application to grant access to
+ """
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-create', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application,
+ 'authentication-method': self.parameters['authentication_method'],
+ 'role-name': self.parameters.get('role_name')})
+ if self.parameters.get('set_password') is not None:
+ user_create.add_new_child('password', self.parameters.get('set_password'))
+ if self.parameters.get('authentication_method') == 'usm':
+ if self.parameters.get('remote_switch_ipaddress') is not None:
+ user_create.add_new_child('remote-switch-ipaddress', self.parameters.get('remote_switch_ipaddress'))
+ snmpv3_login_info = netapp_utils.zapi.NaElement('snmpv3-login-info')
+ if self.parameters.get('authentication_password') is not None:
+ snmpv3_login_info.add_new_child('authentication-password', self.parameters['authentication_password'])
+ if self.parameters.get('authentication_protocol') is not None:
+ snmpv3_login_info.add_new_child('authentication-protocol', self.parameters['authentication_protocol'])
+ if self.parameters.get('engine_id') is not None:
+ snmpv3_login_info.add_new_child('engine-id', self.parameters['engine_id'])
+ if self.parameters.get('privacy_password') is not None:
+ snmpv3_login_info.add_new_child('privacy-password', self.parameters['privacy_password'])
+ if self.parameters.get('privacy_protocol') is not None:
+ snmpv3_login_info.add_new_child('privacy-protocol', self.parameters['privacy_protocol'])
+ user_create.add_child_elem(snmpv3_login_info)
+
+ try:
+ self.server.invoke_successfully(user_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def lock_unlock_user_rest(self, useruuid, username, value=None):
+ data = {
+ 'locked': value
+ }
+ params = {
+ 'name': self.parameters['name'],
+ 'owner.uuid': useruuid,
+ }
+ api = "security/accounts/%s/%s" % (useruuid, username)
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error:
+ self.module.fail_json(msg='Error while locking/unlocking user: %s' % error)
+
+ def lock_given_user(self):
+ """
+ locks the user
+
+ :return:
+ True if user locked
+ False if lock user is not performed
+ :rtype: bool
+ """
+ user_lock = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-lock', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_lock,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error locking user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def unlock_given_user(self):
+ """
+ unlocks the user
+
+ :return:
+ True if user unlocked
+ False if unlock user is not performed
+ :rtype: bool
+ """
+ user_unlock = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-unlock', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_unlock,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == '13114':
+ return False
+ else:
+ self.module.fail_json(msg='Error unlocking user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def delete_user_rest(self):
+ uuid, username = self.get_user_rest()
+ api = "security/accounts/%s/%s" % (uuid, username)
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg='Error while deleting user : %s' % error)
+
+ def delete_user(self, application):
+ """
+ deletes the user for the given application and authentication_method
+ :param: application: application to grant access to
+ """
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-delete', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application,
+ 'authentication-method': self.parameters['authentication_method']})
+
+ try:
+ self.server.invoke_successfully(user_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def is_repeated_password(message):
+ return message.startswith('New password must be different than last 6 passwords.') \
+ or message.startswith('New password must be different from last 6 passwords.') \
+ or message.startswith('New password must be different than the old password.') \
+ or message.startswith('New password must be different from the old password.')
+
+ def change_password_rest(self, useruuid, username):
+ data = {
+ 'password': self.parameters['set_password'],
+ }
+ params = {
+ 'name': self.parameters['name'],
+ 'owner.uuid': useruuid,
+ }
+ api = "security/accounts/%s/%s" % (useruuid, username)
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error:
+ if 'message' in error and self.is_repeated_password(error['message']):
+ # if the password is reused, assume idempotency
+ return False
+ else:
+ self.module.fail_json(msg='Error while updating user password: %s' % error)
+ return True
+
+ def change_password(self):
+ """
+ Changes the password
+
+ :return:
+ True if password updated
+ False if password is not updated
+ :rtype: bool
+ """
+ # self.server.set_vserver(self.parameters['vserver'])
+ modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify-password', **{
+ 'new-password': str(self.parameters.get('set_password')),
+ 'user-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(modify_password,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == '13114':
+ return False
+ # if the user give the same password, instead of returning an error, return ok
+ if to_native(error.code) == '13214' and self.is_repeated_password(error.message):
+ return False
+ self.module.fail_json(msg='Error setting password for user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ self.server.set_vserver(None)
+ return True
+
+ def modify_apps_rest(self, useruuid, username, apps=None):
+ app_list = list()
+ if apps is not None:
+ for app in apps:
+ mydict = {
+ "application": app,
+ "authentication_methods": self.parameters['authentication_method'].split(),
+ }
+ app_list.append(mydict)
+ data = {
+ 'role.name': self.parameters['role_name'],
+ 'applications': app_list
+ }
+ params = {
+ 'name': self.parameters['name'],
+ 'owner.uuid': useruuid,
+ }
+ api = "security/accounts/%s/%s" % (useruuid, username)
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error:
+ self.module.fail_json(msg='Error while modifying user details: %s' % error)
+
+ def modify_user(self, application):
+ """
+ Modify user
+ """
+ user_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application,
+ 'authentication-method': self.parameters['authentication_method'],
+ 'role-name': self.parameters.get('role_name')})
+
+ try:
+ self.server.invoke_successfully(user_modify,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def change_sp_application(self, current_app):
+ if 'service-processor' or 'service_processor' in self.parameters['applications']:
+ if 'service-processor' in current_app:
+ if 'service_processor' in self.parameters['applications']:
+ index = self.parameters['applications'].index('service_processor')
+ self.parameters['applications'][index] = 'service-processor'
+ if 'service_processor' in current_app:
+ if 'service-processor' in self.parameters['applications']:
+ index = self.parameters['applications'].index('service-processor')
+ self.parameters['applications'][index] = 'service_processor'
+
+ def apply_for_rest(self):
+ current = self.get_user_rest()
+ if current is not None:
+ uuid, name = current
+ current = self.get_user_details_rest(name, uuid)
+ self.change_sp_application(current['applications'])
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify_decision = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if current and 'lock_user' not in current:
+ # REST does not return locked if password is not set
+ if cd_action is None and self.parameters.get('lock_user') is not None:
+ if self.parameters.get('set_password') is None:
+ self.module.fail_json(msg='Error: cannot modify lock state if password is not set.')
+ modify_decision['lock_user'] = self.parameters['lock_user']
+ self.na_helper.changed = True
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_user_rest(self.parameters['applications'])
+ elif cd_action == 'delete':
+ self.delete_user_rest()
+ elif modify_decision:
+ if 'role_name' in modify_decision or 'applications' in modify_decision:
+ self.modify_apps_rest(uuid, name, self.parameters['applications'])
+ if cd_action is None and self.parameters.get('set_password') is not None:
+ # if check_mode, don't attempt to change the password, but assume it would be changed
+ if self.module.check_mode or self.change_password_rest(uuid, name):
+ self.na_helper.changed = True
+ if cd_action is None and self.na_helper.changed and not self.module.check_mode:
+ # lock/unlock actions require password to be set
+ if modify_decision and 'lock_user' in modify_decision:
+ self.lock_unlock_user_rest(uuid, name, self.parameters['lock_user'])
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def apply(self):
+ if self.use_rest:
+ self.apply_for_rest()
+ else:
+ create_delete_decision = {}
+ modify_decision = {}
+ netapp_utils.ems_log_event("na_ontap_user", self.server)
+ for application in self.parameters['applications']:
+ current = self.get_user(application)
+
+ if current is not None:
+ current['lock_user'] = self.na_helper.get_value_for_bool(True, current['lock_user'])
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if cd_action is not None:
+ create_delete_decision[application] = cd_action
+ else:
+ modify_decision[application] = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if not create_delete_decision and self.parameters.get('state') == 'present':
+ if self.parameters.get('set_password') is not None:
+ self.na_helper.changed = True
+
+ if self.na_helper.changed:
+
+ if self.module.check_mode:
+ pass
+ else:
+ for application in create_delete_decision:
+ if create_delete_decision[application] == 'create':
+ self.create_user(application)
+ elif create_delete_decision[application] == 'delete':
+ self.delete_user(application)
+ lock_user = False
+ for application in modify_decision:
+ if 'role_name' in modify_decision[application]:
+ self.modify_user(application)
+ if 'lock_user' in modify_decision[application]:
+ lock_user = True
+ if not create_delete_decision and self.parameters.get('set_password') is not None:
+ # if change password return false nothing has changed so we need to set changed to False
+ self.na_helper.changed = self.change_password()
+ # NOTE: unlock has to be performed after setting a password
+ if lock_user:
+ if self.parameters.get('lock_user'):
+ self.lock_given_user()
+ else:
+ self.unlock_given_user()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapUser()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py
new file mode 100644
index 00000000..206342ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_user_role
+
+short_description: NetApp ONTAP user role configuration and management
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy user roles
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the role to manage.
+ required: true
+ type: str
+
+ command_directory_name:
+ description:
+ - The command or command directory to which the role has an access.
+ required: true
+ type: str
+
+ access_level:
+ description:
+ - The name of the role to manage.
+ choices: ['none', 'readonly', 'all']
+ type: str
+ default: all
+
+ query:
+ description:
+ - A query for the role. The query must apply to the specified command or directory name.
+ - Use double quotes "" for modifying a existing query to none.
+ type: str
+ version_added: 2.8.0
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ type: str
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User Role
+ na_ontap_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: volume
+ access_level: none
+ query: show
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify User Role
+ na_ontap_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: volume
+ access_level: none
+ query: ""
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapUserRole(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ command_directory_name=dict(required=True, type='str'),
+ access_level=dict(required=False, type='str', default='all',
+ choices=['none', 'readonly', 'all']),
+ vserver=dict(required=True, type='str'),
+ query=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_role(self):
+ """
+ Checks if the role exists for specific command-directory-name.
+
+ :return:
+ True if role found
+ False if role is not found
+ :rtype: bool
+ """
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name']}
+
+ security_login_role_get_iter = netapp_utils.zapi.NaElement(
+ 'security-login-role-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-info', **options)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_role_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(
+ security_login_role_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16031 denotes a role not being found.
+ if to_native(e.code) == "16031":
+ return None
+ # Error 16039 denotes command directory not found.
+ elif to_native(e.code) == "16039":
+ return None
+ else:
+ self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ role_info = result.get_child_by_name('attributes-list').get_child_by_name('security-login-role-info')
+ result = {
+ 'name': role_info['role-name'],
+ 'access_level': role_info['access-level'],
+ 'command_directory_name': role_info['command-directory-name'],
+ 'query': role_info['role-query']
+ }
+ return result
+
+ return None
+
+ def create_role(self):
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name'],
+ 'access-level': self.parameters['access_level']}
+ if self.parameters.get('query'):
+ options['role-query'] = self.parameters['query']
+ role_create = netapp_utils.zapi.NaElement.create_node_with_children('security-login-role-create', **options)
+
+ try:
+ self.server.invoke_successfully(role_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_role(self):
+ role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-delete', **{'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name':
+ self.parameters['command_directory_name']})
+
+ try:
+ self.server.invoke_successfully(role_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_role(self, modify):
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name']}
+ if 'access_level' in modify.keys():
+ options['access-level'] = self.parameters['access_level']
+ if 'query' in modify.keys():
+ options['role-query'] = self.parameters['query']
+
+ role_modify = netapp_utils.zapi.NaElement.create_node_with_children('security-login-role-modify', **options)
+
+ try:
+ self.server.invoke_successfully(role_modify,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ self.asup_log_for_cserver('na_ontap_user_role')
+ current = self.get_role()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ # if desired state specify empty quote query and current query is None, set desired query to None.
+ # otherwise na_helper.get_modified_attributes will detect a change.
+ if self.parameters.get('query') == '' and current is not None:
+ if current['query'] is None:
+ self.parameters['query'] = None
+
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_role()
+ elif cd_action == 'delete':
+ self.delete_role()
+ elif modify:
+ self.modify_role(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ netapp_utils.ems_log_event(event_name, self.server)
+
+
+def main():
+ obj = NetAppOntapUserRole()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py
new file mode 100644
index 00000000..94df84bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py
@@ -0,0 +1,2100 @@
+#!/usr/bin/python
+
+# (c) 2018-2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_volume
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_volume
+
+short_description: NetApp ONTAP manage volumes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy or modify volumes on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ from_name:
+ description:
+ - Name of the existing volume to be renamed to name.
+ type: str
+ version_added: 2.7.0
+
+ is_infinite:
+ type: bool
+ description:
+ Set True if the volume is an Infinite Volume.
+ Deleting an infinite volume is asynchronous.
+
+ is_online:
+ type: bool
+ description:
+ - Whether the specified volume is online, or not.
+ default: True
+
+ aggregate_name:
+ description:
+ - The name of the aggregate the flexvol should exist on.
+ - Cannot be set when using the na_application_template option.
+ type: str
+
+ nas_application_template:
+ description:
+ - additional options when using the application/applications REST API to create a volume.
+ - the module is using ZAPI by default, and switches to REST if any suboption is present.
+ - create a FlexVol by default.
+ - create a FlexGroup if C(auto_provision_as) is set and C(FlexCache) option is not present.
+ - create a FlexCache if C(flexcache) option is present.
+ type: dict
+ version_added: 20.12.0
+ suboptions:
+ flexcache:
+ description: whether to create a flexcache. If absent, a FlexVol or FlexGroup is created.
+ type: dict
+ suboptions:
+ origin_svm_name:
+ description: the remote SVM for the flexcache.
+ type: str
+ required: true
+ origin_component_name:
+ description: the remote component for the flexcache.
+ type: str
+ required: true
+ cifs_access:
+ description:
+ - The list of CIFS access controls. You must provide I(user_or_group) or I(access) to enable CIFS access.
+ type: list
+ elements: dict
+ suboptions:
+ access:
+ description: The CIFS access granted to the user or group. Default is full_control.
+ type: str
+ choices: [change, full_control, no_access, read]
+ user_or_group:
+ description: The name of the CIFS user or group that will be granted access. Default is Everyone.
+ type: str
+ nfs_access:
+ description:
+ - The list of NFS access controls. You must provide I(host) or I(access) to enable NFS access.
+ - Mutually exclusive with export_policy option in nas_application_template.
+ type: list
+ elements: dict
+ suboptions:
+ access:
+ description: The NFS access granted. Default is rw.
+ type: str
+ choices: [none, ro, rw]
+ host:
+ description: The name of the NFS entity granted access. Default is 0.0.0.0/0.
+ type: str
+ storage_service:
+ description:
+ - The performance service level (PSL) for this volume
+ type: str
+ choices: ['value', 'performance', 'extreme']
+ tiering:
+ description:
+ - Cloud tiering policy (see C(tiering_policy) for a more complete description).
+ type: dict
+ suboptions:
+ control:
+ description: Storage tiering placement rules for the container.
+ choices: ['required', 'best_effort', 'disallowed']
+ type: str
+ policy:
+ description:
+ - Cloud tiering policy (see C(tiering_policy)).
+ - Must match C(tiering_policy) if both are present.
+ choices: ['snapshot-only', 'auto', 'backup', 'none']
+ type: str
+ object_stores:
+ description: list of object store names for tiering.
+ type: list
+ elements: str
+ use_nas_application:
+ description:
+ - Whether to use the application/applications REST/API to create a volume.
+ - This will default to true if any other suboption is present.
+ type: bool
+ default: true
+
+ size:
+ description:
+ - The size of the volume in (size_unit). Required when C(state=present).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: 'gb'
+
+ size_change_threshold:
+ description:
+ - Percentage in size change to trigger a resize.
+ - When this parameter is greater than 0, a difference in size between what is expected and what is configured is ignored if it is below the threshold.
+ - For instance, the nas application allocates a larger size than specified to account for overhead.
+ - Set this to 0 for an exact match.
+ type: int
+ default: 10
+ version_added: 20.12.0
+
+ sizing_method:
+ description:
+ - Represents the method to modify the size of a FlexGroup.
+ - use_existing_resources - Increases or decreases the size of the FlexGroup by increasing or decreasing the size of the current FlexGroup resources.
+ - add_new_resources - Increases the size of the FlexGroup by adding new resources. This is limited to two new resources per available aggregate.
+ - This is only supported if REST is enabled (ONTAP 9.6 or later) and only for FlexGroups. ONTAP defaults to use_existing_resources.
+ type: str
+ choices: ['add_new_resources', 'use_existing_resources']
+ version_added: 20.12.0
+
+ type:
+ description:
+ - The volume type, either read-write (RW) or data-protection (DP).
+ type: str
+
+ export_policy:
+ description:
+ - Name of the export policy.
+ - Mutually exclusive with nfs_access suboption in nas_application_template.
+ type: str
+ aliases: ['policy']
+
+ junction_path:
+ description:
+ - Junction path of the volume.
+ - To unmount, use junction path C('').
+ type: str
+
+ space_guarantee:
+ description:
+ - Space guarantee style for the volume.
+ choices: ['none', 'file', 'volume']
+ type: str
+
+ percent_snapshot_space:
+ description:
+ - Amount of space reserved for snapshot copies of the volume.
+ type: int
+
+ volume_security_style:
+ description:
+ - The security style associated with this volume.
+ choices: ['mixed', 'ntfs', 'unified', 'unix']
+ type: str
+
+ encrypt:
+ type: bool
+ description:
+ - Whether or not to enable Volume Encryption.
+ default: False
+ version_added: 2.7.0
+
+ efficiency_policy:
+ description:
+ - Allows a storage efficiency policy to be set on volume creation.
+ type: str
+ version_added: 2.7.0
+
+ unix_permissions:
+ description:
+ - Unix permission bits in octal or symbolic format.
+ - For example, 0 is equivalent to ------------, 777 is equivalent to ---rwxrwxrwx,both formats are accepted.
+ - The valid octal value ranges between 0 and 777 inclusive.
+ type: str
+ version_added: 2.8.0
+
+ group_id:
+ description:
+ - The UNIX group ID for the volume. The default value is 0 ('root').
+ type: int
+ version_added: '20.1.0'
+
+ user_id:
+ description:
+ - The UNIX user ID for the volume. The default value is 0 ('root').
+ type: int
+ version_added: '20.1.0'
+
+ snapshot_policy:
+ description:
+ - The name of the snapshot policy.
+ - The default policy name is 'default'.
+ - If present, this will set the protection_type when using C(nas_application_template).
+ type: str
+ version_added: 2.8.0
+
+ aggr_list:
+ description:
+ - an array of names of aggregates to be used for FlexGroup constituents.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ aggr_list_multiplier:
+ description:
+ - The number of times to iterate over the aggregates listed with the aggr_list parameter when creating a FlexGroup.
+ type: int
+ version_added: 2.8.0
+
+ auto_provision_as:
+ description:
+ - Automatically provision a FlexGroup volume.
+ version_added: 2.8.0
+ choices: ['flexgroup']
+ type: str
+
+ snapdir_access:
+ description:
+ - This is an advanced option, the default is False.
+ - Enable the visible '.snapshot' directory that is normally present at system internal mount points.
+ - This value also turns on access to all other '.snapshot' directories in the volume.
+ type: bool
+ version_added: 2.8.0
+
+ atime_update:
+ description:
+ - This is an advanced option, the default is True.
+ - If false, prevent the update of inode access times when a file is read.
+ - This value is useful for volumes with extremely high read traffic,
+ since it prevents writes to the inode file for the volume from contending with reads from other files.
+ - This field should be used carefully.
+ - That is, use this field when you know in advance that the correct access time for inodes will not be needed for files on that volume.
+ type: bool
+ version_added: 2.8.0
+
+ wait_for_completion:
+ description:
+ - Set this parameter to 'true' for synchronous execution during create (wait until volume status is online)
+ - Set this parameter to 'false' for asynchronous execution
+ - For asynchronous, execution exits as soon as the request is sent, without checking volume status
+ type: bool
+ default: false
+ version_added: 2.8.0
+
+ time_out:
+ description:
+ - time to wait for flexGroup creation, modification, or deletion in seconds.
+ - Error out if task is not completed in defined time.
+ - if 0, the request is asynchronous.
+ - default is set to 3 minutes.
+ default: 180
+ type: int
+ version_added: 2.8.0
+
+ language:
+ description:
+ - Language to use for Volume
+ - Default uses SVM language
+ - Possible values Language
+ - c POSIX
+ - ar Arabic
+ - cs Czech
+ - da Danish
+ - de German
+ - en English
+ - en_us English (US)
+ - es Spanish
+ - fi Finnish
+ - fr French
+ - he Hebrew
+ - hr Croatian
+ - hu Hungarian
+ - it Italian
+ - ja Japanese euc-j
+ - ja_v1 Japanese euc-j
+ - ja_jp.pck Japanese PCK (sjis)
+ - ja_jp.932 Japanese cp932
+ - ja_jp.pck_v2 Japanese PCK (sjis)
+ - ko Korean
+ - no Norwegian
+ - nl Dutch
+ - pl Polish
+ - pt Portuguese
+ - ro Romanian
+ - ru Russian
+ - sk Slovak
+ - sl Slovenian
+ - sv Swedish
+ - tr Turkish
+ - zh Simplified Chinese
+ - zh.gbk Simplified Chinese (GBK)
+ - zh_tw Traditional Chinese euc-tw
+ - zh_tw.big5 Traditional Chinese Big 5
+ - To use UTF-8 as the NFS character set, append '.UTF-8' to the language code
+ type: str
+ version_added: 2.8.0
+
+ qos_policy_group:
+ description:
+ - Specifies a QoS policy group to be set on volume.
+ type: str
+ version_added: 2.9.0
+
+ qos_adaptive_policy_group:
+ description:
+ - Specifies a QoS adaptive policy group to be set on volume.
+ type: str
+ version_added: 2.9.0
+
+ tiering_policy:
+ description:
+ - The tiering policy that is to be associated with the volume.
+ - This policy decides whether the blocks of a volume will be tiered to the capacity tier.
+ - snapshot-only policy allows tiering of only the volume snapshot copies not associated with the active file system.
+ - auto policy allows tiering of both snapshot and active file system user data to the capacity tier.
+ - backup policy on DP volumes allows all transferred user data blocks to start in the capacity tier.
+ - When set to none, the Volume blocks will not be tiered to the capacity tier.
+ - If no value specified, the volume is assigned snapshot only by default.
+ - Requires ONTAP 9.4 or later.
+ choices: ['snapshot-only', 'auto', 'backup', 'none']
+ type: str
+ version_added: 2.9.0
+
+ space_slo:
+ description:
+ - Specifies the space SLO type for the volume. The space SLO type is the Service Level Objective for space management for the volume.
+ - The space SLO value is used to enforce existing volume settings so that sufficient space is set aside on the aggregate to meet the space SLO.
+ - This parameter is not supported on Infinite Volumes.
+ choices: ['none', 'thick', 'semi-thick']
+ type: str
+ version_added: 2.9.0
+
+ nvfail_enabled:
+ description:
+ - If true, the controller performs additional work at boot and takeover times if it finds that there has been any potential data loss in the volume's
+ constituents due to an NVRAM failure.
+ - The volume's constituents would be put in a special state called 'in-nvfailed-state' such that protocol access is blocked.
+ - This will cause the client applications to crash and thus prevent access to stale data.
+ - To get out of this situation, the admin needs to manually clear the 'in-nvfailed-state' on the volume's constituents.
+ type: bool
+ version_added: 2.9.0
+
+ vserver_dr_protection:
+ description:
+ - Specifies the protection type for the volume in a Vserver DR setup.
+ choices: ['protected', 'unprotected']
+ type: str
+ version_added: 2.9.0
+
+ comment:
+ description:
+ - Sets a comment associated with the volume.
+ type: str
+ version_added: 2.9.0
+
+ snapshot_auto_delete:
+ description:
+ - A dictionary for the auto delete options and values.
+ - Supported options include 'state', 'commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete',
+ 'prefix', 'destroy_list'.
+ - Option 'state' determines if the snapshot autodelete is currently enabled for the volume. Possible values are 'on' and 'off'.
+ - Option 'commitment' determines the snapshots which snapshot autodelete is allowed to delete to get back space.
+ Possible values are 'try', 'disrupt' and 'destroy'.
+ - Option 'trigger' determines the condition which starts the automatic deletion of snapshots.
+ Possible values are 'volume', 'snap_reserve' and DEPRECATED 'space_reserve'.
+ - Option 'target_free_space' determines when snapshot autodelete should stop deleting snapshots. Depending on the trigger,
+ snapshots are deleted till we reach the target free space percentage. Accepts int type.
+ - Option 'delete_order' determines if the oldest or newest snapshot is deleted first. Possible values are 'newest_first' and 'oldest_first'.
+ - Option 'defer_delete' determines which kind of snapshots to delete in the end. Possible values are 'scheduled', 'user_created',
+ 'prefix' and 'none'.
+ - Option 'prefix' can be set to provide the prefix string for the 'prefix' value of the 'defer_delete' option.
+ The prefix string length can be 15 char long.
+ - Option 'destroy_list' is a comma seperated list of services which can be destroyed if the snapshot backing that service is deleted.
+ For 7-mode, the possible values for this option are a combination of 'lun_clone', 'vol_clone', 'cifs_share', 'file_clone' or 'none'.
+ For cluster-mode, the possible values for this option are a combination of 'lun_clone,file_clone' (for LUN clone and/or file clone),
+ 'lun_clone,sfsr' (for LUN clone and/or sfsr), 'vol_clone', 'cifs_share', or 'none'.
+ type: dict
+ version_added: '20.4.0'
+
+ cutover_action:
+ description:
+ - Specifies the action to be taken for cutover.
+ - Possible values are 'abort_on_failure', 'defer_on_failure', 'force' and 'wait'. Default is 'defer_on_failure'.
+ choices: ['abort_on_failure', 'defer_on_failure', 'force', 'wait']
+ type: str
+ version_added: '20.5.0'
+
+ check_interval:
+ description:
+ - The amount of time in seconds to wait between checks of a volume to see if it has moved successfully.
+ default: 30
+ type: int
+ version_added: '20.6.0'
+
+ from_vserver:
+ description:
+ - The source vserver of the volume is rehosted.
+ type: str
+ version_added: '20.6.0'
+
+ auto_remap_luns:
+ description:
+ - Flag to control automatic map of LUNs.
+ type: bool
+ version_added: '20.6.0'
+
+ force_unmap_luns:
+ description:
+ - Flag to control automatic unmap of LUNs.
+ type: bool
+ version_added: '20.6.0'
+
+ force_restore:
+ description:
+ - If this field is set to "true", the Snapshot copy is restored even if the volume has one or more newer Snapshot
+ copies which are currently used as reference Snapshot copy by SnapMirror. If a restore is done in this
+ situation, this will cause future SnapMirror transfers to fail.
+ - Option should only be used along with snapshot_restore.
+ type: bool
+ version_added: '20.6.0'
+
+ preserve_lun_ids:
+ description:
+ - If this field is set to "true", LUNs in the volume being restored will remain mapped and their identities
+ preserved such that host connectivity will not be disrupted during the restore operation. I/O's to the LUN will
+ be fenced during the restore operation by placing the LUNs in an unavailable state. Once the restore operation
+ has completed, hosts will be able to resume I/O access to the LUNs.
+ - Option should only be used along with snapshot_restore.
+ type: bool
+ version_added: '20.6.0'
+
+ snapshot_restore:
+ description:
+ - Name of snapshot to restore from.
+ - Not supported on Infinite Volume.
+ type: str
+ version_added: '20.6.0'
+
+ compression:
+ description:
+ - Whether to enable compression for the volume (HDD and Flash Pool aggregates).
+ - If this option is not present, it is automatically set to true if inline_compression is true.
+ type: bool
+ version_added: '20.12.0'
+
+ inline_compression:
+ description:
+ - Whether to enable inline compression for the volume (HDD and Flash Pool aggregates, AFF platforms).
+ type: bool
+ version_added: '20.12.0'
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexVol
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume12
+ is_infinite: False
+ aggregate_name: ansible_aggr
+ size: 100
+ size_unit: mb
+ user_id: 1001
+ group_id: 2002
+ space_guarantee: none
+ tiering_policy: auto
+ export_policy: default
+ percent_snapshot_space: 60
+ qos_policy_group: max_performance_gold
+ vserver: ansibleVServer
+ wait_for_completion: True
+ space_slo: none
+ nvfail_enabled: False
+ comment: ansible created volume
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Volume Delete
+ na_ontap_volume:
+ state: absent
+ name: ansibleVolume12
+ aggregate_name: ansible_aggr
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Make FlexVol offline
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ is_online: False
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create flexGroup volume manually
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ aggr_list: "{{ aggr_list }}"
+ aggr_list_multiplier: 2
+ size: 200
+ size_unit: mb
+ space_guarantee: none
+ export_policy: default
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ unix_permissions: 777
+ snapshot_policy: default
+ time_out: 0
+
+ - name: Create flexGroup volume auto provsion as flex group
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ auto_provision_as: flexgroup
+ size: 200
+ size_unit: mb
+ space_guarantee: none
+ export_policy: default
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ unix_permissions: 777
+ snapshot_policy: default
+ time_out: 0
+
+ - name: Create FlexVol with QoS adaptive
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume15
+ is_infinite: False
+ aggregate_name: ansible_aggr
+ size: 100
+ size_unit: gb
+ space_guarantee: none
+ export_policy: default
+ percent_snapshot_space: 10
+ qos_adaptive_policy_group: extreme
+ vserver: ansibleVServer
+ wait_for_completion: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify volume dr protection (vserver of the volume must be in a snapmirror relationship)
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ vserver_dr_protection: protected
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+
+ - name: Modify volume with snapshot auto delete options
+ na_ontap_volume:
+ state: present
+ name: vol_auto_delete
+ snapshot_auto_delete:
+ state: "on"
+ commitment: try
+ defer_delete: scheduled
+ target_free_space: 30
+ destroy_list: lun_clone,vol_clone
+ delete_order: newest_first
+ aggregate_name: "{{ aggr }}"
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+
+ - name: Move volume with force cutover action
+ na_ontap_volume:
+ name: ansible_vol
+ aggregate_name: aggr_ansible
+ cutover_action: force
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Rehost volume to another vserver auto remap luns
+ na_ontap_volume:
+ name: ansible_vol
+ from_vserver: ansible
+ auto_remap_luns: true
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Rehost volume to another vserver force unmap luns
+ na_ontap_volume:
+ name: ansible_vol
+ from_vserver: ansible
+ force_unmap_luns: true
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Snapshot restore volume
+ na_ontap_volume:
+ name: ansible_vol
+ vserver: ansible
+ snapshot_restore: 2020-05-24-weekly
+ force_restore: true
+ preserve_lun_ids: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+
+ - name: Volume create using application/applications nas template
+ na_ontap_volume:
+ state: present
+ name: ansibleVolume12
+ vserver: ansibleSVM
+ size: 100000000
+ size_unit: b
+ space_guarantee: none
+ language: es
+ percent_snapshot_space: 60
+ unix_permissions: ---rwxrwxrwx
+ snapshot_policy: default
+ efficiency_policy: default
+ comment: testing
+ nas_application_template:
+ nfs_access: # the mere presence of a suboption is enough to enable this new feature
+ - access: ro
+ - access: rw
+ host: 10.0.0.0/8
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVolume(object):
+ '''Class with volume operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ is_infinite=dict(required=False, type='bool', default=False),
+ is_online=dict(required=False, type='bool', default=True),
+ size=dict(type='int', default=None),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'),
+ sizing_method=dict(choices=['add_new_resources', 'use_existing_resources'], type='str'),
+ aggregate_name=dict(type='str', default=None),
+ type=dict(type='str', default=None),
+ export_policy=dict(type='str', default=None, aliases=['policy']),
+ junction_path=dict(type='str', default=None),
+ space_guarantee=dict(choices=['none', 'file', 'volume'], default=None),
+ percent_snapshot_space=dict(type='int', default=None),
+ volume_security_style=dict(choices=['mixed', 'ntfs', 'unified', 'unix']),
+ encrypt=dict(required=False, type='bool', default=False),
+ efficiency_policy=dict(required=False, type='str'),
+ unix_permissions=dict(required=False, type='str'),
+ group_id=dict(required=False, type='int'),
+ user_id=dict(required=False, type='int'),
+ snapshot_policy=dict(required=False, type='str'),
+ aggr_list=dict(required=False, type='list', elements='str'),
+ aggr_list_multiplier=dict(required=False, type='int'),
+ snapdir_access=dict(required=False, type='bool'),
+ atime_update=dict(required=False, type='bool'),
+ auto_provision_as=dict(choices=['flexgroup'], required=False, type='str'),
+ wait_for_completion=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=180),
+ language=dict(type='str', required=False),
+ qos_policy_group=dict(required=False, type='str'),
+ qos_adaptive_policy_group=dict(required=False, type='str'),
+ nvfail_enabled=dict(type='bool', required=False),
+ space_slo=dict(type='str', required=False, choices=['none', 'thick', 'semi-thick']),
+ tiering_policy=dict(type='str', required=False, choices=['snapshot-only', 'auto', 'backup', 'none']),
+ vserver_dr_protection=dict(type='str', required=False, choices=['protected', 'unprotected']),
+ comment=dict(type='str', required=False),
+ snapshot_auto_delete=dict(type='dict', required=False),
+ cutover_action=dict(required=False, type='str', choices=['abort_on_failure', 'defer_on_failure', 'force', 'wait']),
+ check_interval=dict(required=False, type='int', default=30),
+ from_vserver=dict(required=False, type='str'),
+ auto_remap_luns=dict(required=False, type='bool'),
+ force_unmap_luns=dict(required=False, type='bool'),
+ force_restore=dict(required=False, type='bool'),
+ compression=dict(required=False, type='bool'),
+ inline_compression=dict(required=False, type='bool'),
+ preserve_lun_ids=dict(required=False, type='bool'),
+ snapshot_restore=dict(required=False, type='str'),
+ nas_application_template=dict(type='dict', options=dict(
+ use_nas_application=dict(type='bool', default=True),
+ flexcache=dict(type='dict', options=dict(
+ origin_svm_name=dict(required=True, type='str'),
+ origin_component_name=dict(required=True, type='str')
+ )),
+ cifs_access=dict(type='list', elements='dict', options=dict(
+ access=dict(type='str', choices=['change', 'full_control', 'no_access', 'read']),
+ user_or_group=dict(type='str')
+ )),
+ nfs_access=dict(type='list', elements='dict', options=dict(
+ access=dict(type='str', choices=['none', 'ro', 'rw']),
+ host=dict(type='str')
+ )),
+ storage_service=dict(type='str', choices=['value', 'performance', 'extreme']),
+ tiering=dict(type='dict', options=dict(
+ control=dict(type='str', choices=['required', 'best_effort', 'disallowed']),
+ policy=dict(type='str', choices=['snapshot-only', 'auto', 'backup', 'none']),
+ object_stores=dict(type='list', elements='str') # create only
+ ))
+ )),
+ size_change_threshold=dict(type='int', default=10),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ['space_guarantee', 'space_slo'], ['auto_remap_luns', 'force_unmap_luns']
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.volume_style = None
+ self.warnings = list()
+ self.sis_keys2zapi_get = dict(
+ efficiency_policy='policy',
+ compression='is-compression-enabled',
+ inline_compression='is-inline-compression-enabled')
+ self.sis_keys2zapi_set = dict(
+ efficiency_policy='policy-name',
+ compression='enable-compression',
+ inline_compression='enable-inline-compression')
+
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * \
+ self._size_unit_map[self.parameters['size_unit']]
+ if 'snapshot_auto_delete' in self.parameters:
+ for key in self.parameters['snapshot_auto_delete']:
+ if key not in ['commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete',
+ 'prefix', 'destroy_list', 'state']:
+ self.module.fail_json(msg="snapshot_auto_delete option '%s' is not valid." % key)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.parameters['vserver'])
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ # REST API for application/applications if needed
+ self.rest_api, self.rest_app = self.setup_rest_application()
+
+ def setup_rest_application(self):
+ use_application_template = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'use_nas_application'])
+ rest_api, rest_app = None, None
+ if use_application_template:
+ # consistency checks
+ # tiering policy is duplicated, make sure values are matching
+ tiering_policy_nas = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering', 'policy'])
+ tiering_policy = self.na_helper.safe_get(self.parameters, ['tiering_policy'])
+ if tiering_policy_nas is not None and tiering_policy is not None and tiering_policy_nas != tiering_policy:
+ msg = 'Conflict: if tiering_policy and nas_application_template tiering policy are both set, they must match.'
+ msg += ' Found "%s" and "%s".' % (tiering_policy, tiering_policy_nas)
+ self.module.fail_json(msg=msg)
+ # aggregate_name will force a move if present
+ if self.parameters.get('aggregate_name') is not None:
+ msg = 'Conflict: aggregate_name is not supported when application template is enabled.'\
+ ' Found: aggregate_name: %s' % self.parameters['aggregate_name']
+ self.module.fail_json(msg=msg)
+ nfs_access = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'nfs_access'])
+ if nfs_access is not None and self.na_helper.safe_get(self.parameters, ['export_policy']) is not None:
+ msg = 'Conflict: export_policy option and nfs_access suboption in nas_application_template are mutually exclusive.'
+ self.module.fail_json(msg=msg)
+ rest_api = netapp_utils.OntapRestAPI(self.module)
+ rest_app = RestApplication(rest_api, self.parameters['vserver'], self.parameters['name'])
+ return rest_api, rest_app
+
+ def volume_get_iter(self, vol_name=None):
+ """
+ Return volume-get-iter query results
+ :param vol_name: name of the volume
+ :return: NaElement
+ """
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', vol_name)
+ volume_id_attributes.add_new_child('vserver', self.parameters['vserver'])
+ volume_attributes.add_child_elem(volume_id_attributes)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+ volume_info.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(volume_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return result
+
+ def get_volume(self, vol_name=None):
+ """
+ Return details about the volume
+ :param:
+ name : Name of the volume
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ if vol_name is None:
+ vol_name = self.parameters['name']
+ volume_get_iter = self.volume_get_iter(vol_name)
+ return_value = None
+ if volume_get_iter.get_child_by_name('num-records') and \
+ int(volume_get_iter.get_child_content('num-records')) > 0:
+
+ volume_attributes = volume_get_iter['attributes-list']['volume-attributes']
+ volume_space_attributes = volume_attributes['volume-space-attributes']
+ volume_state_attributes = volume_attributes['volume-state-attributes']
+ volume_id_attributes = volume_attributes['volume-id-attributes']
+ try:
+ volume_export_attributes = volume_attributes['volume-export-attributes']
+ except KeyError: # does not exist for MDV volumes
+ volume_export_attributes = None
+ volume_security_unix_attributes = self.na_helper.safe_get(volume_attributes,
+ ['volume-security-attributes', 'volume-security-unix-attributes'],
+ allow_sparse_dict=False)
+ volume_snapshot_attributes = volume_attributes['volume-snapshot-attributes']
+ volume_performance_attributes = volume_attributes['volume-performance-attributes']
+ volume_snapshot_auto_delete_attributes = volume_attributes['volume-snapshot-autodelete-attributes']
+ try:
+ volume_comp_aggr_attributes = volume_attributes['volume-comp-aggr-attributes']
+ except KeyError: # Not supported in 9.1 to 9.3
+ volume_comp_aggr_attributes = None
+ # Get volume's state (online/offline)
+ current_state = volume_state_attributes['state']
+ is_online = (current_state == "online")
+
+ return_value = {
+ 'name': vol_name,
+ 'size': int(volume_space_attributes['size']),
+ 'is_online': is_online,
+ 'unix_permissions': volume_security_unix_attributes['permissions']
+ }
+ if volume_snapshot_attributes.get_child_by_name('snapshot-policy'):
+ return_value['snapshot_policy'] = volume_snapshot_attributes['snapshot-policy']
+ if volume_export_attributes is not None:
+ return_value['export_policy'] = volume_export_attributes['policy']
+ else:
+ return_value['export_policy'] = None
+ if volume_security_unix_attributes.get_child_by_name('group-id'):
+ return_value['group_id'] = int(volume_security_unix_attributes['group-id'])
+ if volume_security_unix_attributes.get_child_by_name('user-id'):
+ return_value['user_id'] = int(volume_security_unix_attributes['user-id'])
+ if volume_comp_aggr_attributes is not None:
+ return_value['tiering_policy'] = volume_comp_aggr_attributes['tiering-policy']
+ if volume_space_attributes.get_child_by_name('encrypt'):
+ return_value['encrypt'] = self.na_helper.get_value_for_bool(True, volume_space_attributes['encrypt'], 'encrypt')
+ if volume_space_attributes.get_child_by_name('percentage-snapshot-reserve'):
+ return_value['percent_snapshot_space'] = int(volume_space_attributes['percentage-snapshot-reserve'])
+ if volume_id_attributes.get_child_by_name('type'):
+ return_value['type'] = volume_id_attributes['type']
+ if volume_space_attributes.get_child_by_name('space-slo'):
+ return_value['space_slo'] = volume_space_attributes['space-slo']
+ else:
+ return_value['space_slo'] = None
+ if volume_state_attributes.get_child_by_name('is-nvfail-enabled'):
+ return_value['nvfail_enabled'] = self.na_helper.get_value_for_bool(True, volume_state_attributes['is-nvfail-enabled'], 'is-nvfail-enabled')
+ else:
+ return_value['nvfail_enabled'] = None
+ if volume_id_attributes.get_child_by_name('containing-aggregate-name'):
+ return_value['aggregate_name'] = volume_id_attributes['containing-aggregate-name']
+ else:
+ return_value['aggregate_name'] = None
+ if volume_id_attributes.get_child_by_name('junction-path'):
+ return_value['junction_path'] = volume_id_attributes['junction-path']
+ else:
+ return_value['junction_path'] = ''
+ if volume_id_attributes.get_child_by_name('comment'):
+ return_value['comment'] = volume_id_attributes['comment']
+ else:
+ return_value['comment'] = None
+ return_value['uuid'] = self.na_helper.safe_get(volume_id_attributes, ['instance-uuid'])
+ if volume_attributes['volume-security-attributes'].get_child_by_name('style'):
+ # style is not present if the volume is still offline or of type: dp
+ return_value['volume_security_style'] = volume_attributes['volume-security-attributes']['style']
+ if volume_id_attributes.get_child_by_name('style-extended'):
+ return_value['style_extended'] = volume_id_attributes['style-extended']
+ else:
+ return_value['style_extended'] = None
+ if volume_space_attributes.get_child_by_name('space-guarantee'):
+ return_value['space_guarantee'] = volume_space_attributes['space-guarantee']
+ else:
+ return_value['space_guarantee'] = None
+ if volume_snapshot_attributes.get_child_by_name('snapdir-access-enabled'):
+ return_value['snapdir_access'] = self.na_helper.get_value_for_bool(True,
+ volume_snapshot_attributes['snapdir-access-enabled'],
+ 'snapdir-access-enabled')
+ else:
+ return_value['snapdir_access'] = None
+ if volume_performance_attributes.get_child_by_name('is-atime-update-enabled'):
+ return_value['atime_update'] = self.na_helper.get_value_for_bool(True,
+ volume_performance_attributes['is-atime-update-enabled'],
+ 'is-atime-update-enabled')
+ else:
+ return_value['atime_update'] = None
+ if volume_attributes.get_child_by_name('volume-qos-attributes'):
+ volume_qos_attributes = volume_attributes['volume-qos-attributes']
+ if volume_qos_attributes.get_child_by_name('policy-group-name'):
+ return_value['qos_policy_group'] = volume_qos_attributes['policy-group-name']
+ else:
+ return_value['qos_policy_group'] = None
+ if volume_qos_attributes.get_child_by_name('adaptive-policy-group-name'):
+ return_value['qos_adaptive_policy_group'] = volume_qos_attributes['adaptive-policy-group-name']
+ else:
+ return_value['qos_adaptive_policy_group'] = None
+ else:
+ return_value['qos_policy_group'] = None
+ return_value['qos_adaptive_policy_group'] = None
+ if volume_attributes.get_child_by_name('volume-vserver-dr-protection-attributes'):
+ volume_vserver_dr_protection_attributes = volume_attributes['volume-vserver-dr-protection-attributes']
+ if volume_vserver_dr_protection_attributes.get_child_by_name('vserver-dr-protection'):
+ return_value['vserver_dr_protection'] = volume_vserver_dr_protection_attributes['vserver-dr-protection']
+ else:
+ return_value['vserver_dr_protection'] = None
+ # snapshot_auto_delete options
+ auto_delete = dict()
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('commitment'):
+ auto_delete['commitment'] = volume_snapshot_auto_delete_attributes['commitment']
+ else:
+ auto_delete['commitment'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('defer-delete'):
+ auto_delete['defer_delete'] = volume_snapshot_auto_delete_attributes['defer-delete']
+ else:
+ auto_delete['defer_delete'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('delete-order'):
+ auto_delete['delete_order'] = volume_snapshot_auto_delete_attributes['delete-order']
+ else:
+ auto_delete['delete_order'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('destroy-list'):
+ auto_delete['destroy_list'] = volume_snapshot_auto_delete_attributes['destroy-list']
+ else:
+ auto_delete['destroy_list'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('is-autodelete-enabled'):
+ if self.na_helper.get_value_for_bool(True, volume_snapshot_auto_delete_attributes['is-autodelete-enabled'], 'is-autodelete-enabled'):
+ auto_delete['state'] = 'on'
+ else:
+ auto_delete['state'] = 'off'
+ else:
+ auto_delete['is_autodelete_enabled'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('prefix'):
+ auto_delete['prefix'] = volume_snapshot_auto_delete_attributes['prefix']
+ else:
+ auto_delete['prefix'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('target-free-space'):
+ auto_delete['target_free_space'] = int(volume_snapshot_auto_delete_attributes['target-free-space'])
+ else:
+ auto_delete['target_free_space'] = None
+ if volume_snapshot_auto_delete_attributes.get_child_by_name('trigger'):
+ auto_delete['trigger'] = volume_snapshot_auto_delete_attributes['trigger']
+ else:
+ auto_delete['trigger'] = None
+ return_value['snapshot_auto_delete'] = auto_delete
+ self.get_efficiency_info(return_value)
+
+ return return_value
+
+ def fail_on_error(self, error, api=None, stack=False):
+ if error is None:
+ return
+ if api is not None:
+ error = 'calling api: %s: %s' % (api, error)
+ results = dict(msg="Error: %s" % error)
+ if stack:
+ results['stack'] = traceback.format_stack()
+ self.module.fail_json(**results)
+
+ def create_nas_application_component(self):
+ '''Create application component for nas template'''
+ required_options = ('name', 'size')
+ for option in required_options:
+ if self.parameters.get(option) is None:
+ self.module.fail_json(msg='Error: "%s" is required to create nas application.' % option)
+
+ application_component = dict(
+ name=self.parameters['name'],
+ total_size=self.parameters['size'],
+ share_count=1, # 1 is the maximum value for nas
+ scale_out=(self.volume_style == 'flexGroup'),
+ )
+ name = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'storage_service'])
+ if name is not None:
+ application_component['storage_service'] = dict(name=name)
+
+ flexcache = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache'])
+ if flexcache is not None:
+ application_component['flexcache'] = dict(
+ origin=dict(
+ svm=dict(name=flexcache['origin_svm_name']),
+ component=dict(name=flexcache['origin_component_name'])
+ )
+ )
+
+ tiering = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering'])
+ if tiering is not None or self.parameters.get('tiering_policy') is not None:
+ application_component['tiering'] = dict()
+ if tiering is None:
+ tiering = dict()
+ if 'policy' not in tiering:
+ tiering['policy'] = self.parameters.get('tiering_policy')
+ for attr in ('control', 'policy', 'object_stores'):
+ value = tiering.get(attr)
+ if attr == 'object_stores' and value is not None:
+ value = [dict(name=x) for x in value]
+ if value is not None:
+ application_component['tiering'][attr] = value
+ if self.parameters.get('qos_policy') is not None:
+ application_component['qos'] = {
+ "policy": {
+ "name": self.parameters['qos_policy'],
+ }
+ }
+ if self.parameters.get('export_policy') is not None:
+ application_component['export_policy'] = {
+ "name": self.parameters['export_policy'],
+ }
+ return application_component
+
+ def create_volume_body(self):
+ '''Create body for nas template'''
+ nas = dict(application_components=[self.create_nas_application_component()])
+ value = self.na_helper.safe_get(self.parameters, ['snapshot_policy'])
+ if value is not None:
+ nas['protection_type'] = dict(local_policy=value)
+ for attr in ('nfs_access', 'cifs_access'):
+ value = self.na_helper.safe_get(self.parameters, ['nas_application_template', attr])
+ if value is not None:
+ # we expect value to be a list of dicts, with maybe some empty entries
+ value = self.na_helper.filter_out_none_entries(value)
+ if value:
+ nas[attr] = value
+ return self.rest_app.create_application_body("nas", nas)
+
+ def create_nas_application(self):
+ '''Use REST application/applications nas template to create a volume'''
+ body, error = self.create_volume_body()
+ self.fail_on_error(error)
+ response, error = self.rest_app.create_application(body)
+ self.fail_on_error(error)
+ return response
+
+ def create_volume(self):
+ '''Create ONTAP volume'''
+ if self.rest_app:
+ return self.create_nas_application()
+ if self.volume_style == 'flexGroup':
+ return self.create_volume_async()
+
+ options = self.create_volume_options()
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create', **options)
+ try:
+ self.server.invoke_successfully(volume_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else ''
+ self.module.fail_json(msg='Error provisioning volume %s%s: %s'
+ % (self.parameters['name'], size_msg, to_native(error)),
+ exception=traceback.format_exc())
+ self.ems_log_event("volume-create")
+
+ if self.parameters.get('wait_for_completion'):
+ # round off time_out
+ retries = (self.parameters['time_out'] + 5) // 10
+ is_online = None
+ errors = list()
+ while not is_online and retries > 0:
+ try:
+ current = self.get_volume()
+ is_online = None if current is None else current['is_online']
+ except KeyError as err:
+ # get_volume may receive incomplete data as the volume is being created
+ errors.append(repr(err))
+ if not is_online:
+ time.sleep(10)
+ retries = retries - 1
+ if not is_online:
+ errors.append("Timeout after %s seconds" % self.parameters['time_out'])
+ self.module.fail_json(msg='Error waiting for volume %s to come online: %s'
+ % (self.parameters['name'], str(errors)))
+ return None
+
+ def create_volume_async(self):
+ '''
+ create volume async.
+ '''
+ options = self.create_volume_options()
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create-async', **options)
+ if self.parameters.get('aggr_list'):
+ aggr_list_obj = netapp_utils.zapi.NaElement('aggr-list')
+ volume_create.add_child_elem(aggr_list_obj)
+ for aggr in self.parameters['aggr_list']:
+ aggr_list_obj.add_new_child('aggr-name', aggr)
+ try:
+ result = self.server.invoke_successfully(volume_create, enable_tunneling=True)
+ self.ems_log_event("volume-create")
+ except netapp_utils.zapi.NaApiError as error:
+ size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else ''
+ self.module.fail_json(msg='Error provisioning volume %s%s: %s'
+ % (self.parameters['name'], size_msg, to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'create')
+ return None
+
+ def create_volume_options(self):
+ '''Set volume options for create operation'''
+ options = {}
+ if self.volume_style == 'flexGroup':
+ options['volume-name'] = self.parameters['name']
+ if self.parameters.get('aggr_list_multiplier') is not None:
+ options['aggr-list-multiplier'] = str(self.parameters['aggr_list_multiplier'])
+ if self.parameters.get('auto_provision_as') is not None:
+ options['auto-provision-as'] = self.parameters['auto_provision_as']
+ if self.parameters.get('space_guarantee') is not None:
+ options['space-guarantee'] = self.parameters['space_guarantee']
+ else:
+ options['volume'] = self.parameters['name']
+ if self.parameters.get('aggregate_name') is None:
+ self.module.fail_json(msg='Error provisioning volume %s: aggregate_name is required'
+ % self.parameters['name'])
+ options['containing-aggr-name'] = self.parameters['aggregate_name']
+ if self.parameters.get('space_guarantee') is not None:
+ options['space-reserve'] = self.parameters['space_guarantee']
+
+ if self.parameters.get('size') is not None:
+ options['size'] = str(self.parameters['size'])
+ if self.parameters.get('snapshot_policy') is not None:
+ options['snapshot-policy'] = self.parameters['snapshot_policy']
+ if self.parameters.get('unix_permissions') is not None:
+ options['unix-permissions'] = self.parameters['unix_permissions']
+ if self.parameters.get('group_id') is not None:
+ options['group-id'] = str(self.parameters['group_id'])
+ if self.parameters.get('user_id') is not None:
+ options['user-id'] = str(self.parameters['user_id'])
+ if self.parameters.get('volume_security_style') is not None:
+ options['volume-security-style'] = self.parameters['volume_security_style']
+ if self.parameters.get('export_policy') is not None:
+ options['export-policy'] = self.parameters['export_policy']
+ if self.parameters.get('junction_path') is not None:
+ options['junction-path'] = self.parameters['junction_path']
+ if self.parameters.get('comment') is not None:
+ options['volume-comment'] = self.parameters['comment']
+ if self.parameters.get('type') is not None:
+ options['volume-type'] = self.parameters['type']
+ if self.parameters.get('percent_snapshot_space') is not None:
+ options['percentage-snapshot-reserve'] = str(self.parameters['percent_snapshot_space'])
+ if self.parameters.get('language') is not None:
+ options['language-code'] = self.parameters['language']
+ if self.parameters.get('qos_policy_group') is not None:
+ options['qos-policy-group-name'] = self.parameters['qos_policy_group']
+ if self.parameters.get('qos_adaptive_policy_group') is not None:
+ options['qos-adaptive-policy-group-name'] = self.parameters['qos_adaptive_policy_group']
+ if self.parameters.get('nvfail_enabled') is not None:
+ options['is-nvfail-enabled'] = str(self.parameters['nvfail_enabled'])
+ if self.parameters.get('space_slo') is not None:
+ options['space-slo'] = self.parameters['space_slo']
+ if self.parameters.get('tiering_policy') is not None:
+ options['tiering-policy'] = self.parameters['tiering_policy']
+ if self.parameters.get('encrypt') is not None:
+ options['encrypt'] = self.na_helper.get_value_for_bool(False, self.parameters['encrypt'], 'encrypt')
+ if self.parameters.get('vserver_dr_protection') is not None:
+ options['vserver-dr-protection'] = self.parameters['vserver_dr_protection']
+ if self.parameters['is_online']:
+ options['volume-state'] = 'online'
+ else:
+ options['volume-state'] = 'offline'
+ return options
+
+ def delete_volume(self, current):
+ '''Delete ONTAP volume'''
+ if self.parameters.get('is_infinite') or self.volume_style == 'flexGroup':
+ if current['is_online']:
+ self.change_volume_state(call_from_delete_vol=True)
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy-async', **{'volume-name': self.parameters['name']})
+ else:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy', **{'name': self.parameters['name'], 'unmount-and-offline': 'true'})
+ try:
+ result = self.server.invoke_successfully(volume_delete, enable_tunneling=True)
+ if self.parameters.get('is_infinite') or self.volume_style == 'flexGroup':
+ self.check_invoke_result(result, 'delete')
+ self.ems_log_event("volume-delete")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def move_volume(self):
+ '''Move volume from source aggregate to destination aggregate'''
+ volume_move = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-move-start', **{'source-volume': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ 'dest-aggr': self.parameters['aggregate_name']})
+ if self.parameters.get('cutover_action'):
+ volume_move.add_new_child('cutover-action', self.parameters['cutover_action'])
+ try:
+ self.cluster.invoke_successfully(volume_move,
+ enable_tunneling=True)
+ self.ems_log_event("volume-move")
+ except netapp_utils.zapi.NaApiError as error:
+ if not self.move_volume_with_rest_passthrough():
+ self.module.fail_json(msg='Error moving volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def move_volume_with_rest_passthrough(self):
+ # MDV volume will fail on a move, but will work using the REST CLI pass through
+ # vol move start -volume MDV_CRS_d6b0b313ff5611e9837100a098544e51_A -destination-aggregate data_a3 -vserver wmc66-a
+ rest_api = netapp_utils.OntapRestAPI(self.module)
+ use_rest = rest_api.is_rest()
+ # if REST isn't available fail with the original error
+ if not use_rest:
+ return False
+ # if REST exists let's try moving using the passthrough CLI
+ api = 'private/cli/volume/move/start'
+ data = {'volume:': self.parameters['name'],
+ 'destination-aggregate': self.parameters['aggregate_name'],
+ 'vserver': self.parameters['vserver']}
+ dummy, error = rest_api.patch(api, data)
+ if error is not None:
+ self.module.fail_json(msg='Error moving volume %s: %s' % (self.parameters['name'], error))
+ return True
+
+ def wait_for_volume_move(self):
+ waiting = True
+ fail_count = 0
+ while waiting:
+ volume_move_iter = netapp_utils.zapi.NaElement('volume-move-get-iter')
+ volume_move_info = netapp_utils.zapi.NaElement('volume-move-info')
+ volume_move_info.add_new_child('volume', self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_move_info)
+ volume_move_iter.add_child_elem(query)
+ try:
+ result = self.cluster.invoke_successfully(volume_move_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if fail_count < 3:
+ fail_count += 1
+ time.sleep(self.parameters['check_interval'])
+ continue
+ self.module.fail_json(msg='Error getting volume move status: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ # reset fail count to 0
+ fail_count = 0
+ volume_move_status = result.get_child_by_name('attributes-list').get_child_by_name('volume-move-info').\
+ get_child_content('state')
+ # We have 5 states that can be returned.
+ # warning and healthy are state where the move is still going so we don't need to do anything for thouse.
+ if volume_move_status == 'done':
+ waiting = False
+ if volume_move_status in ['failed', 'alert']:
+ self.module.fail_json(msg='Error moving volume %s: %s' %
+ (self.parameters['name'],
+ result.get_child_by_name('attributes-list')[0].get_child_by_name('details')))
+ time.sleep(self.parameters['check_interval'])
+
+ def rename_volume(self):
+ """
+ Rename the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume. Use time_out parameter to set wait time for rename completion.
+ """
+ vol_rename_zapi, vol_name_zapi = ['volume-rename-async', 'volume-name'] if self.parameters['is_infinite']\
+ else ['volume-rename', 'volume']
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_rename_zapi, **{vol_name_zapi: self.parameters['from_name'],
+ 'new-volume-name': str(self.parameters['name'])})
+ try:
+ result = self.server.invoke_successfully(volume_rename, enable_tunneling=True)
+ if vol_rename_zapi == 'volume-rename-async':
+ self.check_invoke_result(result, 'rename')
+ self.ems_log_event("volume-rename")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rest_resize_volume(self):
+ """
+ Re-size the volume using REST PATCH method.
+ """
+ uuid = self.parameters['uuid']
+ if uuid is None:
+ self.module.fail_json(msg='Could not read UUID for volume %s' % self.parameters['name'])
+ api = '/storage/volumes/%s' % uuid
+ body = dict(size=self.parameters['size'])
+ query = dict(sizing_method=self.parameters['sizing_method'])
+ rest_api = netapp_utils.OntapRestAPI(self.module)
+ response, error = rest_api.patch(api, body, query)
+ self.fail_on_error(error, api)
+ return response
+
+ def resize_volume(self):
+ """
+ Re-size the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to resize an
+ Infinite Volume.
+ """
+ if self.parameters.get('sizing_method') is not None:
+ return self.rest_resize_volume()
+
+ vol_size_zapi, vol_name_zapi = ['volume-size-async', 'volume-name']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexGroup')\
+ else ['volume-size', 'volume']
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_size_zapi, **{vol_name_zapi: self.parameters['name'],
+ 'new-size': str(self.parameters['size'])})
+ try:
+ result = self.server.invoke_successfully(volume_resize, enable_tunneling=True)
+ if vol_size_zapi == 'volume-size-async':
+ self.check_invoke_result(result, 'resize')
+ self.ems_log_event("volume-resize")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error re-sizing volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return None
+
+ def change_volume_state(self, call_from_delete_vol=False):
+ """
+ Change volume's state (offline/online).
+ """
+ if self.parameters['is_online'] and not call_from_delete_vol: # Desired state is online, setup zapi APIs respectively
+ vol_state_zapi, vol_name_zapi, action = ['volume-online-async', 'volume-name', 'online']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexGroup')\
+ else ['volume-online', 'name', 'online']
+ else: # Desired state is offline, setup zapi APIs respectively
+ vol_state_zapi, vol_name_zapi, action = ['volume-offline-async', 'volume-name', 'offline']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexGroup')\
+ else ['volume-offline', 'name', 'offline']
+ volume_unmount = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **{'volume-name': self.parameters['name']})
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_state_zapi, **{vol_name_zapi: self.parameters['name']})
+ try:
+ if not self.parameters['is_online'] or call_from_delete_vol: # Unmount before offline
+ self.server.invoke_successfully(volume_unmount, enable_tunneling=True)
+ result = self.server.invoke_successfully(volume_change_state, enable_tunneling=True)
+ if self.volume_style == 'flexGroup' or self.parameters['is_infinite']:
+ self.check_invoke_result(result, action)
+ self.ems_log_event("change-state")
+ except netapp_utils.zapi.NaApiError as error:
+ state = "online" if self.parameters['is_online'] else "offline"
+ self.module.fail_json(msg='Error changing the state of volume %s to %s: %s'
+ % (self.parameters['name'], state, to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_volume_attribute(self, zapi_object, parent_attribute, attribute, value):
+ """
+
+ :param parent_attribute:
+ :param child_attribute:
+ :param value:
+ :return:
+ """
+ if isinstance(parent_attribute, str):
+ vol_attribute = netapp_utils.zapi.NaElement(parent_attribute)
+ vol_attribute.add_new_child(attribute, value)
+ zapi_object.add_child_elem(vol_attribute)
+ else:
+ zapi_object.add_new_child(attribute, value)
+ parent_attribute.add_child_elem(zapi_object)
+
+ def volume_modify_attributes(self, params):
+ """
+ modify volume parameter 'export_policy','unix_permissions','snapshot_policy','space_guarantee', 'percent_snapshot_space',
+ 'qos_policy_group', 'qos_adaptive_policy_group'
+ """
+ if self.volume_style == 'flexGroup' or self.parameters['is_infinite']:
+ vol_mod_iter = netapp_utils.zapi.NaElement('volume-modify-iter-async')
+ else:
+ vol_mod_iter = netapp_utils.zapi.NaElement('volume-modify-iter')
+ attributes = netapp_utils.zapi.NaElement('attributes')
+ vol_mod_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ # Volume-attributes is split in to 25 sub categories
+ # volume-space-attributes
+ vol_space_attributes = netapp_utils.zapi.NaElement('volume-space-attributes')
+ if self.parameters.get('space_guarantee') is not None:
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes,
+ 'space-guarantee', self.parameters['space_guarantee'])
+ if self.parameters.get('percent_snapshot_space') is not None:
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes,
+ 'percentage-snapshot-reserve', str(self.parameters['percent_snapshot_space']))
+ if self.parameters.get('space_slo') is not None:
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'space-slo', self.parameters['space_slo'])
+ # volume-snapshot-attributes
+ vol_snapshot_attributes = netapp_utils.zapi.NaElement('volume-snapshot-attributes')
+ if self.parameters.get('snapshot_policy') is not None:
+ self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes,
+ 'snapshot-policy', self.parameters['snapshot_policy'])
+ if self.parameters.get('snapdir_access') is not None:
+ self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes,
+ 'snapdir-access-enabled',
+ self.na_helper.get_value_for_bool(False, self.parameters['snapdir_access'], 'snapdir_access'))
+ # volume-export-attributes
+ if self.parameters.get('export_policy') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-export-attributes',
+ 'policy', self.parameters['export_policy'])
+ # volume-security-attributes
+ if self.parameters.get('unix_permissions') is not None or self.parameters.get('group_id') is not None or self.parameters.get('user_id') is not None:
+ vol_security_attributes = netapp_utils.zapi.NaElement('volume-security-attributes')
+ vol_security_unix_attributes = netapp_utils.zapi.NaElement('volume-security-unix-attributes')
+ if self.parameters.get('unix_permissions') is not None:
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
+ 'permissions', self.parameters['unix_permissions'])
+ if self.parameters.get('group_id') is not None:
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
+ 'group-id', str(self.parameters['group_id']))
+ if self.parameters.get('user_id') is not None:
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes,
+ 'user-id', str(self.parameters['user_id']))
+ vol_mod_attributes.add_child_elem(vol_security_attributes)
+ if params and params.get('volume_security_style') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-security-attributes',
+ 'style', self.parameters['volume_security_style'])
+
+ # volume-performance-attributes
+ if self.parameters.get('atime_update') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-performance-attributes',
+ 'is-atime-update-enabled', self.na_helper.get_value_for_bool(False, self.parameters['atime_update'], 'atime_update'))
+ # volume-qos-attributes
+ if self.parameters.get('qos_policy_group') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-qos-attributes',
+ 'policy-group-name', self.parameters['qos_policy_group'])
+ if self.parameters.get('qos_adaptive_policy_group') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-qos-attributes',
+ 'adaptive-policy-group-name', self.parameters['qos_adaptive_policy_group'])
+ # volume-comp-aggr-attributes
+ if params and params.get('tiering_policy') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-comp-aggr-attributes',
+ 'tiering-policy', self.parameters['tiering_policy'])
+ # volume-state-attributes
+ if self.parameters.get('nvfail_enabled') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-state-attributes', 'is-nvfail-enabled', str(self.parameters['nvfail_enabled']))
+ # volume-dr-protection-attributes
+ if self.parameters.get('vserver_dr_protection') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-vserver-dr-protection-attributes',
+ 'vserver-dr-protection', self.parameters['vserver_dr_protection'])
+ # volume-id-attributes
+ if self.parameters.get('comment') is not None:
+ self.create_volume_attribute(vol_mod_attributes, 'volume-id-attributes',
+ 'comment', self.parameters['comment'])
+ # End of Volume-attributes sub attributes
+ attributes.add_child_elem(vol_mod_attributes)
+ query = netapp_utils.zapi.NaElement('query')
+ vol_query_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ self.create_volume_attribute(vol_query_attributes, 'volume-id-attributes',
+ 'name', self.parameters['name'])
+ query.add_child_elem(vol_query_attributes)
+ vol_mod_iter.add_child_elem(attributes)
+ vol_mod_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(vol_mod_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ error_msg = to_native(error)
+ if 'volume-comp-aggr-attributes' in error_msg:
+ error_msg += ". Added info: tiering option requires 9.4 or later."
+ self.module.fail_json(msg='Error modifying volume %s: %s'
+ % (self.parameters['name'], error_msg),
+ exception=traceback.format_exc())
+
+ self.ems_log_event("volume-modify")
+ failures = result.get_child_by_name('failure-list')
+ # handle error if modify space, policy, or unix-permissions parameter fails
+ if failures is not None:
+ error_msgs = list()
+ for return_info in ('volume-modify-iter-info', 'volume-modify-iter-async-info'):
+ if failures.get_child_by_name(return_info) is not None:
+ error_msgs.append(failures.get_child_by_name(return_info).get_child_content('error-message'))
+ if error_msgs and any([x is not None for x in error_msgs]):
+ self.module.fail_json(msg="Error modifying volume %s: %s"
+ % (self.parameters['name'], ' --- '.join(error_msgs)),
+ exception=traceback.format_exc())
+ if self.volume_style == 'flexGroup' or self.parameters['is_infinite']:
+ success = result.get_child_by_name('success-list')
+ success = success.get_child_by_name('volume-modify-iter-async-info')
+ results = dict()
+ for key in ('status', 'jobid'):
+ if success and success.get_child_by_name(key):
+ results[key] = success[key]
+ status = results.get('status')
+ if status == 'in_progress' and 'jobid' in results:
+ if self.parameters['time_out'] == 0:
+ return
+ error = self.check_job_status(results['jobid'])
+ if error is None:
+ return
+ self.module.fail_json(msg='Error when modify volume: %s' % error)
+ self.module.fail_json(msg='Unexpected error when modifying volume: result is: %s' % str(result.to_string()))
+
+ def volume_mount(self):
+ """
+ Mount an existing volume in specified junction_path
+ :return: None
+ """
+ vol_mount = netapp_utils.zapi.NaElement('volume-mount')
+ vol_mount.add_new_child('volume-name', self.parameters['name'])
+ vol_mount.add_new_child('junction-path', self.parameters['junction_path'])
+ try:
+ self.server.invoke_successfully(vol_mount, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error mounting volume %s on path %s: %s'
+ % (self.parameters['name'], self.parameters['junction_path'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def volume_unmount(self):
+ """
+ Unmount an existing volume
+ :return: None
+ """
+ vol_unmount = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **{'volume-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(vol_unmount, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error unmounting volume %s: %s'
+ % (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def modify_volume(self, modify):
+ '''Modify volume action'''
+ attributes = modify.keys()
+ # order matters here, if both is_online and mount in modify, must bring the volume online first.
+ if 'is_online' in attributes:
+ self.change_volume_state()
+ for attribute in attributes:
+ if attribute in ['space_guarantee', 'export_policy', 'unix_permissions', 'group_id', 'user_id', 'tiering_policy',
+ 'snapshot_policy', 'percent_snapshot_space', 'snapdir_access', 'atime_update', 'volume_security_style',
+ 'nvfail_enabled', 'space_slo', 'qos_policy_group', 'qos_adaptive_policy_group', 'vserver_dr_protection', 'comment']:
+ self.volume_modify_attributes(modify)
+ break
+ if 'snapshot_auto_delete' in attributes:
+ self.set_snapshot_auto_delete()
+ if 'junction_path' in attributes:
+ if modify.get('junction_path') == '':
+ self.volume_unmount()
+ else:
+ self.volume_mount()
+ if 'size' in attributes:
+ self.resize_volume()
+ if 'aggregate_name' in attributes:
+ # keep it last, as it may take some time
+ self.move_volume()
+ if self.parameters.get('wait_for_completion'):
+ self.wait_for_volume_move()
+
+ def compare_chmod_value(self, current):
+ """
+ compare current unix_permissions to desire unix_permissions.
+ :return: True if the same, False it not the same or desire unix_permissions is not valid.
+ """
+ desire = self.parameters
+ if current is None:
+ return False
+ octal_value = ''
+ unix_permissions = desire['unix_permissions']
+ if unix_permissions.isdigit():
+ return int(current['unix_permissions']) == int(unix_permissions)
+ else:
+ if len(unix_permissions) != 12:
+ return False
+ if unix_permissions[:3] != '---':
+ return False
+ for i in range(3, len(unix_permissions), 3):
+ if unix_permissions[i] not in ['r', '-'] or unix_permissions[i + 1] not in ['w', '-']\
+ or unix_permissions[i + 2] not in ['x', '-']:
+ return False
+ group_permission = self.char_to_octal(unix_permissions[i:i + 3])
+ octal_value += str(group_permission)
+ return int(current['unix_permissions']) == int(octal_value)
+
+ def char_to_octal(self, chars):
+ """
+ :param chars: Characters to be converted into octal values.
+ :return: octal value of the individual group permission.
+ """
+ total = 0
+ if chars[0] == 'r':
+ total += 4
+ if chars[1] == 'w':
+ total += 2
+ if chars[2] == 'x':
+ total += 1
+ return total
+
+ def get_volume_style(self, current):
+ '''Get volume style, infinite or standard flexvol'''
+ if current is None:
+ if self.parameters.get('aggr_list') or self.parameters.get('aggr_list_multiplier') or self.parameters.get('auto_provision_as'):
+ return 'flexGroup'
+ else:
+ if current.get('style_extended'):
+ if current['style_extended'] == 'flexgroup':
+ return 'flexGroup'
+ else:
+ return current['style_extended']
+ return None
+
+ def get_job(self, jobid, server):
+ """
+ Get job details by id
+ """
+ job_get = netapp_utils.zapi.NaElement('job-get')
+ job_get.add_new_child('job-id', jobid)
+ try:
+ result = server.invoke_successfully(job_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # Not found
+ return None
+ self.module.fail_json(msg='Error fetching job info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ job_info = result.get_child_by_name('attributes').get_child_by_name('job-info')
+ results = {
+ 'job-progress': job_info['job-progress'],
+ 'job-state': job_info['job-state']
+ }
+ if job_info.get_child_by_name('job-completion') is not None:
+ results['job-completion'] = job_info['job-completion']
+ else:
+ results['job-completion'] = None
+ return results
+
+ def check_job_status(self, jobid):
+ """
+ Loop until job is complete
+ """
+ server = self.server
+ sleep_time = 5
+ time_out = self.parameters['time_out']
+ results = self.get_job(jobid, server)
+ error = 'timeout'
+
+ while time_out > 0:
+ results = self.get_job(jobid, server)
+ # If running as cluster admin, the job is owned by cluster vserver
+ # rather than the target vserver.
+ if results is None and server == self.server:
+ results = netapp_utils.get_cserver(self.server)
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ continue
+ if results is None:
+ error = 'cannot locate job with id: %d' % int(jobid)
+ break
+ if results['job-state'] in ('queued', 'running'):
+ time.sleep(sleep_time)
+ time_out -= sleep_time
+ continue
+ if results['job-state'] in ('success', 'failure'):
+ break
+ else:
+ self.module.fail_json(msg='Unexpected job status in: %s' % repr(results))
+
+ if results is not None:
+ if results['job-state'] == 'success':
+ error = None
+ elif results['job-state'] in ('queued', 'running'):
+ error = 'job completion exceeded expected timer of: %s seconds' % \
+ self.parameters['time_out']
+ else:
+ if results['job-completion'] is not None:
+ error = results['job-completion']
+ else:
+ error = results['job-progress']
+ return error
+
+ def check_invoke_result(self, result, action):
+ '''
+ check invoked api call back result.
+ '''
+ results = dict()
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ return
+ error = self.check_job_status(results['result-jobid'])
+ if error is None:
+ return
+ else:
+ self.module.fail_json(msg='Error when %s volume: %s' % (action, error))
+ if status == 'failed':
+ self.module.fail_json(msg='Operation failed when %s volume.' % action)
+
+ def set_efficiency_attributes(self, options):
+ for key, attr in self.sis_keys2zapi_set.items():
+ value = self.parameters.get(key)
+ if value is not None:
+ if self.argument_spec[key]['type'] == 'bool':
+ value = self.na_helper.get_value_for_bool(False, value)
+ options[attr] = value
+ # ZAPI requires compression to be set for inline-compression
+ if options.get('enable-inline-compression') == 'true' and 'enable-compression' not in options:
+ options['enable-compression'] = 'true'
+
+ def set_efficiency_config(self):
+ '''Set efficiency policy and compression attributes'''
+ options = {'path': '/vol/' + self.parameters['name']}
+ efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable', **options)
+ try:
+ self.server.invoke_successfully(efficiency_enable, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 40043 denotes an Operation has already been enabled.
+ if to_native(error.code) == "40043":
+ pass
+ else:
+ self.module.fail_json(msg='Error enable efficiency on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ self.set_efficiency_attributes(options)
+ efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config', **options)
+ try:
+ self.server.invoke_successfully(efficiency_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting up efficiency attributes on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_efficiency_config_async(self):
+ """Set efficiency policy and compression attributes in asynchronous mode"""
+ options = {'volume-name': self.parameters['name']}
+ efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable-async', **options)
+ try:
+ result = self.server.invoke_successfully(efficiency_enable, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error enable efficiency on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'enable efficiency on')
+
+ self.set_efficiency_attributes(options)
+ efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config-async', **options)
+ try:
+ result = self.server.invoke_successfully(efficiency_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting up efficiency attributes on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'set efficiency policy on')
+
+ def get_efficiency_info(self, return_value):
+ """
+ get the name of the efficiency policy assigned to volume, as well as compression values
+ if attribute does not exist, set its value to None
+ :return: update return_value dict.
+ """
+ sis_info = netapp_utils.zapi.NaElement('sis-get-iter')
+ sis_status_info = netapp_utils.zapi.NaElement('sis-status-info')
+ sis_status_info.add_new_child('path', '/vol/' + self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(sis_status_info)
+ sis_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(sis_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching efficiency policy for volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ for key in self.sis_keys2zapi_get:
+ return_value[key] = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ sis_attributes = result.get_child_by_name('attributes-list'). get_child_by_name('sis-status-info')
+ for key, attr in self.sis_keys2zapi_get.items():
+ value = sis_attributes.get_child_content(attr)
+ if self.argument_spec[key]['type'] == 'bool':
+ value = self.na_helper.get_value_for_bool(True, value)
+ return_value[key] = value
+
+ def modify_volume_efficiency_config(self, efficiency_config_modify_value):
+ if efficiency_config_modify_value == 'async':
+ self.set_efficiency_config_async()
+ else:
+ self.set_efficiency_config()
+
+ def set_snapshot_auto_delete(self):
+ options = {'volume': self.parameters['name']}
+ desired_options = self.parameters['snapshot_auto_delete']
+ for key, value in desired_options.items():
+ options['option-name'] = key
+ options['option-value'] = str(value)
+ snapshot_auto_delete = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-autodelete-set-option', **options)
+ try:
+ self.server.invoke_successfully(snapshot_auto_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting snapshot auto delete options for volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rehost_volume(self):
+ volume_rehost = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rehost', **{'vserver': self.parameters['from_vserver'],
+ 'destination-vserver': self.parameters['vserver'],
+ 'volume': self.parameters['name']})
+ if self.parameters.get('auto_remap_luns') is not None:
+ volume_rehost.add_new_child('auto-remap-luns', str(self.parameters['auto_remap_luns']))
+ if self.parameters.get('force_unmap_luns') is not None:
+ volume_rehost.add_new_child('force-unmap-luns', str(self.parameters['force_unmap_luns']))
+ try:
+ self.cluster.invoke_successfully(volume_rehost, enable_tunneling=True)
+ self.ems_log_event("volume-rehost")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error rehosting volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapshot_restore_volume(self):
+ snapshot_restore = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapshot-restore-volume', **{'snapshot': self.parameters['snapshot_restore'],
+ 'volume': self.parameters['name']})
+ if self.parameters.get('force_restore') is not None:
+ snapshot_restore.add_new_child('force', str(self.parameters['force_restore']))
+ if self.parameters.get('preserve_lun_ids') is not None:
+ snapshot_restore.add_new_child('preserve-lun-ids', str(self.parameters['preserve_lun_ids']))
+ try:
+ self.server.invoke_successfully(snapshot_restore, enable_tunneling=True)
+ self.ems_log_event("snapshot-restore-volume")
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error restoring volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def adjust_size(self, current, after_create):
+ """
+ ignore small change in size by resetting expectations
+ """
+ if after_create:
+ # ignore change in size immediately after a create:
+ self.parameters['size'] = current['size']
+ elif self.parameters['size_change_threshold'] > 0:
+ if 'size' in current and self.parameters.get('size') is not None:
+ # ignore a less than XX% difference
+ if abs(current['size'] - self.parameters['size']) * 100 / current['size'] < self.parameters['size_change_threshold']:
+ self.parameters['size'] = current['size']
+
+ def set_modify_dict(self, current, after_create=False):
+ '''Fill modify dict with changes'''
+ # snapshot_auto_delete's value is a dict, get_modified_attributes function doesn't support dict as value.
+ auto_delete_info = current.pop('snapshot_auto_delete', None)
+ # ignore small changes in size by adjusting self.parameters['size']
+ self.adjust_size(current, after_create)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify is not None and 'type' in modify:
+ self.module.fail_json(msg="Changing the same volume from one type to another is not allowed.")
+ if self.parameters.get('snapshot_auto_delete') is not None:
+ auto_delete_modify = self.na_helper.get_modified_attributes(auto_delete_info,
+ self.parameters['snapshot_auto_delete'])
+ if len(auto_delete_modify) > 0:
+ modify['snapshot_auto_delete'] = auto_delete_modify
+ return modify
+
+ def take_modify_actions(self, modify):
+ if modify.get('is_online'):
+ # when moving to online, include parameters that get does not return when volume is offline
+ for field in ['volume_security_style', 'group_id', 'user_id', 'percent_snapshot_space']:
+ if self.parameters.get(field) is not None:
+ modify[field] = self.parameters[field]
+ self.modify_volume(modify)
+
+ if any([modify.get(key) is not None for key in self.sis_keys2zapi_get]):
+ if self.parameters.get('is_infinite') or self.volume_style == 'flexGroup':
+ efficiency_config_modify = 'async'
+ else:
+ efficiency_config_modify = 'sync'
+ self.modify_volume_efficiency_config(efficiency_config_modify)
+
+ def apply(self):
+ '''Call create/modify/delete operations'''
+ response = None
+ modify_after_create = None
+ current = self.get_volume()
+ self.volume_style = self.get_volume_style(current)
+ # rename and create are mutually exclusive
+ rename, rehost, snapshot_restore, cd_action, modify = None, None, None, None, None
+ if self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(self.get_volume(self.parameters['from_name']), current)
+ elif self.parameters.get('from_vserver'):
+ rehost = True
+ self.na_helper.changed = True
+ elif self.parameters.get('snapshot_restore'):
+ snapshot_restore = True
+ self.na_helper.changed = True
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters.get('unix_permissions') is not None:
+ # current stores unix_permissions' numeric value.
+ # unix_permission in self.parameter can be either numeric or character.
+ if self.compare_chmod_value(current) or not self.parameters['is_online']:
+ # don't change if the values are the same
+ # can't change permissions if not online
+ del self.parameters['unix_permissions']
+ if cd_action is None and rename is None and rehost is None and self.parameters['state'] == 'present':
+ modify = self.set_modify_dict(current)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if rename:
+ self.rename_volume()
+ if rehost:
+ self.rehost_volume()
+ if snapshot_restore:
+ self.snapshot_restore_volume()
+ if cd_action == 'create':
+ response = self.create_volume()
+ # if we create using ZAPI and modify only options are set (snapdir_access or atime_update), we need to run a modify.
+ # The modify also takes care of efficiency (sis) parameters and snapshot_auto_delete.
+ # If we create using REST application, some options are not available, we may need to run a modify.
+ current = self.get_volume()
+ if current:
+ modify_after_create = self.set_modify_dict(current, after_create=True)
+ if modify_after_create:
+ self.take_modify_actions(modify_after_create)
+ # restore this, as set_modify_dict could set it to False
+ self.na_helper.changed = True
+ elif cd_action == 'delete':
+ self.delete_volume(current)
+ elif modify:
+ self.parameters['uuid'] = current['uuid']
+ self.take_modify_actions(modify)
+
+ result = dict(
+ changed=self.na_helper.changed
+ )
+ if response is not None:
+ result['response'] = response
+ if modify:
+ result['modify'] = modify
+ if modify_after_create:
+ result['modify_after_create'] = modify_after_create
+ if self.warnings:
+ result['warnings'] = self.warnings
+ self.module.exit_json(**result)
+
+ def ems_log_event(self, state):
+ '''Autosupport log event'''
+ if state == 'create':
+ message = "A Volume has been created, size: " + \
+ str(self.parameters['size']) + str(self.parameters['size_unit'])
+ elif state == 'volume-delete':
+ message = "A Volume has been deleted"
+ elif state == 'volume-move':
+ message = "A Volume has been moved"
+ elif state == 'volume-rename':
+ message = "A Volume has been renamed"
+ elif state == 'volume-resize':
+ message = "A Volume has been resized to: " + \
+ str(self.parameters['size']) + str(self.parameters['size_unit'])
+ elif state == 'volume-rehost':
+ message = "A Volume has been rehosted"
+ elif state == 'snapshot-restore-volume':
+ message = "A Volume has been restored by snapshot"
+ elif state == 'volume-change':
+ message = "A Volume state has been changed"
+ else:
+ message = "na_ontap_volume has been called"
+ netapp_utils.ems_log_event(
+ "na_ontap_volume", self.server, event=message)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapVolume()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py
new file mode 100644
index 00000000..b3433133
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_volume_autosize
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_volume_autosize
+short_description: NetApp ONTAP manage volume autosize
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify Volume AutoSize
+options:
+ volume:
+ description:
+ - The name of the flexible volume for which we want to set autosize.
+ type: str
+ required: true
+
+ mode:
+ description:
+ - Specify the flexible volume's autosize mode of operation.
+ type: str
+ choices: ['grow', 'grow_shrink', 'off']
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ grow_threshold_percent:
+ description:
+ - Specifies the percentage of the flexible volume's capacity at which autogrow is initiated.
+ - The default grow threshold varies from 85% to 98%, depending on the volume size.
+ - It is an error for the grow threshold to be less than or equal to the shrink threshold.
+ - Range between 0 and 100
+ type: int
+
+ increment_size:
+ description:
+ - Specify the flexible volume's increment size using the following format < number > [k|m|g|t]
+ - The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ type: str
+
+ maximum_size:
+ description:
+ - Specify the flexible volume's maximum allowed size using the following format < number > [k|m|g|t]
+ - The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ - The default value is 20% greater than the volume size at the time autosize was enabled.
+ - It is an error for the maximum volume size to be less than the current volume size.
+ - It is also an error for the maximum size to be less than or equal to the minimum size.
+ type: str
+
+ minimum_size:
+ description:
+ - Specify the flexible volume's minimum allowed size using the following format < number > [k|m|g|t] The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ - The default value is the size of the volume at the time the 'grow_shrink' mode was enabled.
+ - It is an error for the minimum size to be greater than or equal to the maximum size.
+ type: str
+
+ reset:
+ description:
+ - "Sets the values of maximum_size, increment_size, minimum_size, grow_threshold_percent, shrink_threshold_percent and mode to their defaults"
+ - If reset paramater is present system will always perform reset action, so idempotency is not supported.
+ type: bool
+
+ shrink_threshold_percent:
+ description:
+ - Specifies the percentage of the flexible volume's capacity at which autoshrink is initiated.
+ - The default shrink theshold is 50%. It is an error for the shrink threshold to be greater than or equal to the grow threshold.
+ - Range between 0 and 100
+ type: int
+'''
+
+EXAMPLES = """
+ - name: Modify volume autosize
+ na_ontap_volume_autosize:
+ hostname: 10.193.79.189
+ username: admin
+ password: netapp1!
+ volume: ansibleVolumesize12
+ mode: grow
+ grow_threshold_percent: 99
+ increment_size: 50m
+ maximum_size: 10g
+ minimum_size: 21m
+ shrink_threshold_percent: 40
+ vserver: ansible_vserver
+
+ - name: Reset volume autosize
+ na_ontap_volume_autosize:
+ hostname: 10.193.79.189
+ username: admin
+ password: netapp1!
+ volume: ansibleVolumesize12
+ reset: true
+ vserver: ansible_vserver
+"""
+
+RETURN = """
+"""
+import copy
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVolumeAutosize(object):
+ ''' volume autosize configuration '''
+ def __init__(self):
+ self.use_rest = False
+ # Volume_autosize returns KB and not B like Volume so values are shifted down 1
+ self._size_unit_map = dict(
+ k=1,
+ m=1024,
+ g=1024 ** 2,
+ t=1024 ** 3,
+ )
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ volume=dict(required=True, type="str"),
+ mode=dict(required=False, choices=['grow', 'grow_shrink', 'off']),
+ vserver=dict(required=True, type='str'),
+ grow_threshold_percent=dict(required=False, type='int'),
+ increment_size=dict(required=False, type='str'),
+ maximum_size=dict(required=False, type='str'),
+ minimum_size=dict(required=False, type='str'),
+ reset=dict(required=False, type='bool'),
+ shrink_threshold_percent=dict(required=False, type='int')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['reset', 'maximum_size'],
+ ['reset', 'increment_size'],
+ ['reset', 'minimum_size'],
+ ['reset', 'grow_threshold_percent'],
+ ['reset', 'shrink_threshold_percent'],
+ ['reset', 'mode']
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ # increment size and reset are not supported with rest api
+ if self.parameters.get('increment_size'):
+ self.module.fail_json(msg="Rest API does not support increment size, please switch to ZAPI")
+ if self.parameters.get('reset'):
+ self.module.fail_json(msg="Rest API does not support reset, please switch to ZAPI")
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_volume_autosize(self, uuid=None):
+ """
+ Get volume_autosize information from the ONTAP system
+ :return:
+ """
+ if self.use_rest:
+ params = {'fields': 'autosize'}
+ api = 'storage/volumes/' + uuid
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ return self._create_get_volume_return(message['autosize'])
+ else:
+ volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-get')
+ volume_autosize_info.add_new_child('volume', self.parameters['volume'])
+ try:
+ result = self.server.invoke_successfully(volume_autosize_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching volume autosize infor for %s : %s' % (self.parameters['volume'],
+ to_native(error)),
+ exception=traceback.format_exc())
+ return self._create_get_volume_return(result)
+
+ def _create_get_volume_return(self, results):
+ """
+ Create a return value from volume-autosize-get info file
+ :param results:
+ :return:
+ """
+ return_value = {}
+ if self.use_rest:
+ if 'mode' in results:
+ return_value['mode'] = results['mode']
+ if 'grow_threshold' in results:
+ return_value['grow_threshold_percent'] = results['grow_threshold']
+ if 'maximum' in results:
+ return_value['maximum_size'] = results['maximum']
+ if 'minimum' in results:
+ return_value['minimum_size'] = results['minimum']
+ if 'shrink_threshold' in results:
+ return_value['shrink_threshold_percent'] = results['shrink_threshold']
+ else:
+ if results.get_child_by_name('mode'):
+ return_value['mode'] = results.get_child_content('mode')
+ if results.get_child_by_name('grow-threshold-percent'):
+ return_value['grow_threshold_percent'] = int(results.get_child_content('grow-threshold-percent'))
+ if results.get_child_by_name('increment-size'):
+ return_value['increment_size'] = results.get_child_content('increment-size')
+ if results.get_child_by_name('maximum-size'):
+ return_value['maximum_size'] = results.get_child_content('maximum-size')
+ if results.get_child_by_name('minimum-size'):
+ return_value['minimum_size'] = results.get_child_content('minimum-size')
+ if results.get_child_by_name('shrink-threshold-percent'):
+ return_value['shrink_threshold_percent'] = int(results.get_child_content('shrink-threshold-percent'))
+ if return_value == {}:
+ return_value = None
+ return return_value
+
+ def modify_volume_autosize(self, uuid=None):
+ """
+ Modify a Volumes autosize
+ :return:
+ """
+ if self.use_rest:
+ params = {}
+ data = {}
+ autosize = {}
+ if self.parameters.get('mode'):
+ autosize['mode'] = self.parameters['mode']
+ if self.parameters.get('grow_threshold_percent'):
+ autosize['grow_threshold'] = self.parameters['grow_threshold_percent']
+ if self.parameters.get('maximum_size'):
+ autosize['maximum'] = self.parameters['maximum_size']
+ if self.parameters.get('minimum_size'):
+ autosize['minimum'] = self.parameters['minimum_size']
+ if self.parameters.get('shrink_threshold_percent'):
+ autosize['shrink_threshold'] = self.parameters['shrink_threshold_percent']
+ data['autosize'] = autosize
+ api = "storage/volumes/" + uuid
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+
+ else:
+ volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-set')
+ volume_autosize_info.add_new_child('volume', self.parameters['volume'])
+ if self.parameters.get('mode'):
+ volume_autosize_info.add_new_child('mode', self.parameters['mode'])
+ if self.parameters.get('grow_threshold_percent'):
+ volume_autosize_info.add_new_child('grow-threshold-percent', str(self.parameters['grow_threshold_percent']))
+ if self.parameters.get('increment_size'):
+ volume_autosize_info.add_new_child('increment-size', self.parameters['increment_size'])
+ if self.parameters.get('reset') is not None:
+ volume_autosize_info.add_new_child('reset', str(self.parameters['reset']))
+ if self.parameters.get('maximum_size'):
+ volume_autosize_info.add_new_child('maximum-size', self.parameters['maximum_size'])
+ if self.parameters.get('minimum_size'):
+ volume_autosize_info.add_new_child('minimum-size', self.parameters['minimum_size'])
+ if self.parameters.get('shrink_threshold_percent'):
+ volume_autosize_info.add_new_child('shrink-threshold-percent', str(self.parameters['shrink_threshold_percent']))
+ try:
+ self.server.invoke_successfully(volume_autosize_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modify volume autosize for %s: %s" % (self.parameters["volume"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_to_kb(self, converted_parameters):
+ """
+ Save a coverted parameter
+ :param converted_parameters: Dic of all parameters
+ :return:
+ """
+ for attr in ['maximum_size', 'minimum_size', 'increment_size']:
+ if converted_parameters.get(attr):
+ if self.use_rest:
+ converted_parameters[attr] = self.convert_to_byte(attr, converted_parameters)
+ else:
+ converted_parameters[attr] = str(self.convert_to_kb(attr, converted_parameters))
+ return converted_parameters
+
+ def convert_to_kb(self, variable, converted_parameters):
+ """
+ Convert a number 10m in to its correct KB size
+ :param variable: the Parameter we are going to covert
+ :param converted_parameters: Dic of all parameters
+ :return:
+ """
+ if converted_parameters.get(variable)[-1] not in ['k', 'm', 'g', 't']:
+ self.module.fail_json(msg="%s must end with a k, m, g or t" % variable)
+ return self._size_unit_map[converted_parameters.get(variable)[-1]] * int(converted_parameters.get(variable)[:-1])
+
+ def convert_to_byte(self, variable, converted_parameters):
+ if converted_parameters.get(variable)[-1] not in ['k', 'm', 'g', 't']:
+ self.module.fail_json(msg="%s must end with a k, m, g or t" % variable)
+ return (self._size_unit_map[converted_parameters.get(variable)[-1]] * int(converted_parameters.get(variable)[:-1])) * 1024
+
+ def get_volume_uuid(self):
+ """
+ Get a volume's UUID
+ :return: uuid of the volume
+ """
+ params = {'fields': '*',
+ 'name': self.parameters['volume'],
+ 'svm.name': self.parameters['vserver']}
+ api = "storage/volumes"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ return message['records'][0]['uuid']
+
+ def apply(self):
+ # TODO Logging for rest
+ uuid = None
+ if not self.use_rest:
+ netapp_utils.ems_log_event("na_ontap_volume_autosize", self.server)
+ if self.use_rest:
+ # we only have the volume name, we need to the the uuid for the volume
+ uuid = self.get_volume_uuid()
+ current = self.get_volume_autosize(uuid=uuid)
+ converted_parameters = copy.deepcopy(self.parameters)
+ converted_parameters = self.modify_to_kb(converted_parameters)
+ self.na_helper.get_modified_attributes(current, converted_parameters)
+ if self.parameters.get('reset') is True:
+ self.na_helper.changed = True
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ self.modify_volume_autosize(uuid=uuid)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Apply volume autosize operations from playbook
+ :return:
+ """
+ obj = NetAppOntapVolumeAutosize()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py
new file mode 100644
index 00000000..f0a0ef43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_volume_clone
+short_description: NetApp ONTAP manage volume clones.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create NetApp ONTAP volume clones.
+- A FlexClone License is required to use this module
+options:
+ state:
+ description:
+ - Whether volume clone should be created.
+ choices: ['present']
+ type: str
+ default: 'present'
+ parent_volume:
+ description:
+ - The parent volume of the volume clone being created.
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the volume clone being created.
+ required: true
+ type: str
+ aliases:
+ - volume
+ vserver:
+ description:
+ - Vserver in which the volume clone should be created.
+ required: true
+ type: str
+ parent_snapshot:
+ description:
+ - Parent snapshot in which volume clone is created off.
+ type: str
+ parent_vserver:
+ description:
+ - Vserver of parent volume in which clone is created off.
+ type: str
+ qos_policy_group_name:
+ description:
+ - The qos-policy-group-name which should be set for volume clone.
+ type: str
+ space_reserve:
+ description:
+ - The space_reserve setting which should be used for the volume clone.
+ choices: ['volume', 'none']
+ type: str
+ volume_type:
+ description:
+ - The volume-type setting which should be used for the volume clone.
+ choices: ['rw', 'dp']
+ type: str
+ junction_path:
+ version_added: 2.8.0
+ description:
+ - Junction path of the volume.
+ type: str
+ uid:
+ version_added: 2.9.0
+ description:
+ - The UNIX user ID for the clone volume.
+ type: int
+ gid:
+ version_added: 2.9.0
+ description:
+ - The UNIX group ID for the clone volume.
+ type: int
+ split:
+ version_added: '20.2.0'
+ description:
+ - Split clone volume from parent volume.
+ type: bool
+'''
+
+EXAMPLES = """
+ - name: create volume clone
+ na_ontap_volume_clone:
+ state: present
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ vserver: vs_hack
+ parent_volume: normal_volume
+ name: clone_volume_7
+ space_reserve: none
+ parent_snapshot: backup1
+ junction_path: /clone_volume_7
+ uid: 1
+ gid: 1
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPVolumeClone(object):
+ """
+ Creates a volume clone
+ """
+
+ def __init__(self):
+ """
+ Initialize the NetAppOntapVolumeClone class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ parent_volume=dict(required=True, type='str'),
+ name=dict(required=True, type='str', aliases=["volume"]),
+ vserver=dict(required=True, type='str'),
+ parent_snapshot=dict(required=False, type='str', default=None),
+ parent_vserver=dict(required=False, type='str', default=None),
+ qos_policy_group_name=dict(required=False, type='str', default=None),
+ space_reserve=dict(required=False, type='str', choices=['volume', 'none'], default=None),
+ volume_type=dict(required=False, type='str', choices=['rw', 'dp']),
+ junction_path=dict(required=False, type='str', default=None),
+ uid=dict(required=False, type='int'),
+ gid=dict(required=False, type='int'),
+ split=dict(required=False, type='bool', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ['uid', 'gid']
+ ]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ if self.parameters.get('parent_vserver'):
+ # use cluster ZAPI, as vserver ZAPI does not support parent-vserser for create
+ self.create_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # keep vserver for ems log and clone-get
+ self.vserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ else:
+ self.vserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ self.create_server = self.vserver
+ return
+
+ def create_volume_clone(self):
+ """
+ Creates a new volume clone
+ """
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-create')
+ clone_obj.add_new_child("parent-volume", self.parameters['parent_volume'])
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ if self.parameters.get('qos_policy_group_name'):
+ clone_obj.add_new_child("qos-policy-group-name", self.parameters['qos_policy_group_name'])
+ if self.parameters.get('space_reserve'):
+ clone_obj.add_new_child("space-reserve", self.parameters['space_reserve'])
+ if self.parameters.get('parent_snapshot'):
+ clone_obj.add_new_child("parent-snapshot", self.parameters['parent_snapshot'])
+ if self.parameters.get('parent_vserver'):
+ clone_obj.add_new_child("parent-vserver", self.parameters['parent_vserver'])
+ clone_obj.add_new_child("vserver", self.parameters['vserver'])
+ if self.parameters.get('volume_type'):
+ clone_obj.add_new_child("volume-type", self.parameters['volume_type'])
+ if self.parameters.get('junction_path'):
+ clone_obj.add_new_child("junction-path", self.parameters['junction_path'])
+ if self.parameters.get('uid'):
+ clone_obj.add_new_child("uid", str(self.parameters['uid']))
+ clone_obj.add_new_child("gid", str(self.parameters['gid']))
+ try:
+ self.create_server.invoke_successfully(clone_obj, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error creating volume clone: %s: %s' %
+ (self.parameters['name'], to_native(exc)), exception=traceback.format_exc())
+ if 'split' in self.parameters and self.parameters['split']:
+ self.start_volume_clone_split()
+
+ def modify_volume_clone(self):
+ """
+ Modify an existing volume clone
+ """
+ if 'split' in self.parameters and self.parameters['split']:
+ self.start_volume_clone_split()
+
+ def start_volume_clone_split(self):
+ """
+ Starts a volume clone split
+ """
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-split-start')
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ try:
+ self.vserver.invoke_successfully(clone_obj, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error starting volume clone split: %s: %s' %
+ (self.parameters['name'], to_native(exc)), exception=traceback.format_exc())
+
+ def get_volume_clone(self):
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-get')
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ current = None
+ try:
+ results = self.vserver.invoke_successfully(clone_obj, True)
+ if results.get_child_by_name('attributes'):
+ attributes = results.get_child_by_name('attributes')
+ info = attributes.get_child_by_name('volume-clone-info')
+ current = {}
+ # Check if clone is currently splitting. Whilst a split is in
+ # progress, these attributes are present in 'volume-clone-info':
+ # block-percentage-complete, blocks-scanned & blocks-updated.
+ if info.get_child_by_name('block-percentage-complete') or \
+ info.get_child_by_name('blocks-scanned') or \
+ info.get_child_by_name('blocks-updated'):
+ current["split"] = True
+ else:
+ # Clone hasn't been split.
+ current["split"] = False
+ return current
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 denotes a volume clone not being found.
+ if to_native(error.code) == "15661":
+ pass
+ else:
+ self.module.fail_json(msg='Error fetching volume clone information %s: %s' %
+ (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+ return None
+
+ def apply(self):
+ """
+ Run Module based on playbook
+ """
+ netapp_utils.ems_log_event("na_ontap_volume_clone", self.vserver)
+ current = self.get_volume_clone()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_volume_clone()
+ if modify:
+ self.modify_volume_clone()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Volume Clone object and runs the correct play task
+ """
+ obj = NetAppONTAPVolumeClone()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py
new file mode 100644
index 00000000..084c6fe4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_volume_snaplock
+
+short_description: NetApp ONTAP manage volume snaplock retention.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.2.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modifies the snaplock retention of volumes on NetApp ONTAP.
+options:
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ default_retention_period:
+ description:
+ - Specifies the default retention period that will be applied.
+ - The format is "<number> <units>" for example "10 days", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ - If this option is specified as "max", then maximum_retention_period will be used as the default retention period.
+ type: str
+
+ autocommit_period:
+ description:
+ - Specifies the autocommit-period for the snaplock volume.
+ - The format is "<number> <units>" for example "8 hours", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+ is_volume_append_mode_enabled:
+ description:
+ - Specifies if the volume append mode must be enabled or disabled.
+ - It can be modified only when the volume is not mounted and does not have any data or Snapshot copy.
+ - Volume append mode is not supported on SnapLock audit log volumes.
+ - When it is enabled, all files created with write permissions on the volume will be WORM appendable files by default.
+ - All WORM appendable files not modified for a period greater than the autocommit period of the volume are also committed to WORM read-only state.
+ type: bool
+
+ maximum_retention_period:
+ description:
+ - Specifies the allowed maximum retention period that will be applied.
+ - The format is "<number> <units>" for example "2 years", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+ minimum_retention_period:
+ description:
+ - Specifies the allowed minimum retention period that will be applied.
+ - The format is "<number> <units>" for example "1 days", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Set volume snaplock
+ na_ontap_volume_snaplock:
+ vserver: svm
+ name: ansibleVolume
+ default_retention_period: "5 days"
+ minimum_retention_period: "0 years"
+ maximum_retention_period: "10 days"
+ is_volume_append_mode_enabled: False
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVolumeSnaplock(object):
+ '''Class with volume operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ default_retention_period=dict(required=False, type='str'),
+ maximum_retention_period=dict(required=False, type='str'),
+ minimum_retention_period=dict(required=False, type='str'),
+ autocommit_period=dict(required=False, type='str'),
+ is_volume_append_mode_enabled=dict(required=False, type='bool'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_volume_snaplock_attrs(self):
+ """
+ Return volume-get-snaplock-attrs query results
+ :param vol_name: name of the volume
+ :return: dict of the volume snaplock attrs
+ """
+ volume_snaplock = netapp_utils.zapi.NaElement('volume-get-snaplock-attrs')
+ volume_snaplock.add_new_child('volume', self.parameters['name'])
+
+ try:
+ result = self.server.invoke_successfully(volume_snaplock, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snaplock attributes for volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+
+ if result.get_child_by_name('snaplock-attrs'):
+ volume_snaplock_attributes = result['snaplock-attrs']['snaplock-attrs-info']
+ return_value = {
+ 'autocommit_period': volume_snaplock_attributes['autocommit-period'],
+ 'default_retention_period': volume_snaplock_attributes['default-retention-period'],
+ 'is_volume_append_mode_enabled': self.na_helper.get_value_for_bool(True, volume_snaplock_attributes['is-volume-append-mode-enabled']),
+ 'maximum_retention_period': volume_snaplock_attributes['maximum-retention-period'],
+ 'minimum_retention_period': volume_snaplock_attributes['minimum-retention-period'],
+ }
+ return return_value
+
+ def set_volume_snaplock_attrs(self, modify):
+ '''Set ONTAP volume snaplock attributes'''
+ volume_snaplock_obj = netapp_utils.zapi.NaElement('volume-set-snaplock-attrs')
+ volume_snaplock_obj.add_new_child('volume', self.parameters['name'])
+ if modify.get('autocommit_period') is not None:
+ volume_snaplock_obj.add_new_child('autocommit-period', self.parameters['autocommit_period'])
+ if modify.get('default_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('default-retention-period', self.parameters['default_retention_period'])
+ if modify.get('is_volume_append_mode_enabled') is not None:
+ volume_snaplock_obj.add_new_child('is-volume-append-mode-enabled',
+ self.na_helper.get_value_for_bool(False, self.parameters['is_volume_append_mode_enabled']))
+ if modify.get('maximum_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('maximum-retention-period', self.parameters['maximum_retention_period'])
+ if modify.get('minimum_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('minimum-retention-period', self.parameters['minimum_retention_period'])
+ try:
+ self.server.invoke_successfully(volume_snaplock_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting snaplock attributes for volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_volume_snaplock", self.server)
+ current, modify = self.get_volume_snaplock_attrs(), None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ self.set_volume_snaplock_attrs(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Set volume snaplock attributes from playbook'''
+ obj = NetAppOntapVolumeSnaplock()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py
new file mode 100644
index 00000000..4ac35fce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_vscan
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan
+short_description: NetApp ONTAP Vscan enable/disable.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+notes:
+- on demand task, on_access_policy and scanner_pools must be set up before running this module
+description:
+- Enable and Disable Vscan
+options:
+ enable:
+ description:
+ - Whether to enable to disable a Vscan
+ type: bool
+ default: True
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Enable Vscan
+ na_ontap_vscan:
+ enable: True
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: trident_svm
+
+ - name: Disable Vscan
+ na_ontap_vscan:
+ enable: False
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: trident_svm
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscan(object):
+ ''' enable/disable vscan '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ enable=dict(type='bool', default=True),
+ vserver=dict(required=True, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_vscan(self):
+ if self.use_rest:
+ params = {'fields': 'svm,enabled',
+ "svm.name": self.parameters['vserver']}
+ api = "protocols/vscan"
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ return message['records'][0]
+ else:
+ vscan_status_iter = netapp_utils.zapi.NaElement('vscan-status-get-iter')
+ vscan_status_info = netapp_utils.zapi.NaElement('vscan-status-info')
+ vscan_status_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(vscan_status_info)
+ vscan_status_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(vscan_status_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting Vscan info for Vserver %s: %s' %
+ (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result.get_child_by_name('attributes-list').get_child_by_name('vscan-status-info')
+
+ def enable_vscan(self, uuid=None):
+ if self.use_rest:
+ params = {"svm.name": self.parameters['vserver']}
+ data = {"enabled": self.parameters['enable']}
+ api = "protocols/vscan/" + uuid
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ else:
+ vscan_status_obj = netapp_utils.zapi.NaElement("vscan-status-modify")
+ vscan_status_obj.add_new_child('is-vscan-enabled', str(self.parameters['enable']))
+ try:
+ self.server.invoke_successfully(vscan_status_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error Enable/Disabling Vscan: %s" % to_native(error), exception=traceback.format_exc())
+
+ def asup_log(self):
+ if self.use_rest:
+ # TODO: logging for Rest
+ return
+ else:
+ # Either we are using ZAPI, or REST failed when it should not
+ try:
+ netapp_utils.ems_log_event("na_ontap_vscan", self.server)
+ except Exception:
+ # TODO: we may fail to connect to REST or ZAPI, the line below shows REST issues only
+ # self.module.fail_json(msg=repr(self.rest_api.errors), log=repr(self.rest_api.debug_logs))
+ pass
+
+ def apply(self):
+ changed = False
+ self.asup_log()
+ current = self.get_vscan()
+ if self.use_rest:
+ if current['enabled'] != self.parameters['enable']:
+ if not self.module.check_mode:
+ self.enable_vscan(current['svm']['uuid'])
+ changed = True
+ else:
+ if current.get_child_content('is-vscan-enabled') != str(self.parameters['enable']).lower():
+ if not self.module.check_mode:
+ self.enable_vscan()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscan()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py
new file mode 100644
index 00000000..b523ae8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_on_access_policy
+short_description: NetApp ONTAP Vscan on access policy configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure on access policy for Vscan (virus scan)
+options:
+ state:
+ description:
+ - Whether a Vscan on Access policy is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ policy_name:
+ description:
+ - The name of the policy
+ required: true
+ type: str
+
+ file_ext_to_exclude:
+ description:
+ - File extensions for which On-Access scanning must not be performed.
+ type: list
+ elements: str
+
+ file_ext_to_include:
+ description:
+ - File extensions for which On-Access scanning is considered. The default value is '*', which means that all files are considered for scanning except
+ - those which are excluded from scanning.
+ type: list
+ elements: str
+
+ filters:
+ description:
+ - A list of filters which can be used to define the scope of the On-Access policy more precisely. The filters can be added in any order. Possible values
+ - scan_ro_volume Enable scans for read-only volume,
+ - scan_execute_access Scan only files opened with execute-access (CIFS only)
+ type: list
+ elements: str
+
+ is_scan_mandatory:
+ description:
+ - Specifies whether access to a file is allowed if there are no external virus-scanning servers available for virus scanning. It is true if not provided at
+ the time of creating a policy.
+ type: bool
+
+ max_file_size:
+ description:
+ - Max file-size (in bytes) allowed for scanning. The default value of 2147483648 (2GB) is taken if not provided at the time of creating a policy.
+ type: int
+
+ paths_to_exclude:
+ description:
+ - File paths for which On-Access scanning must not be performed.
+ type: list
+ elements: str
+
+ scan_files_with_no_ext:
+ description:
+ - Specifies whether files without any extension are considered for scanning or not.
+ default: true
+ type: bool
+
+ policy_status:
+ description:
+ - Status for the created policy
+ default: false
+ type: bool
+ version_added: 20.8.0
+'''
+
+EXAMPLES = """
+ - name: Create Vscan On Access Policy
+ na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml']
+ - name: Create Vscan On Access Policy with Policy Status enabled
+ na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml']
+ policy_status: True
+ - name: modify Vscan on Access Policy
+ na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml', 'py']
+ - name: Delete On Access Policy
+ na_ontap_vscan_on_access_policy:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscanOnAccessPolicy(object):
+ """
+ Create/Modify/Delete a Vscan OnAccess policy
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ file_ext_to_exclude=dict(required=False, type='list', elements='str'),
+ file_ext_to_include=dict(required=False, type='list', elements='str'),
+ filters=dict(required=False, type='list', elements='str'),
+ is_scan_mandatory=dict(required=False, type='bool', default=False),
+ max_file_size=dict(required=False, type="int"),
+ paths_to_exclude=dict(required=False, type='list', elements='str'),
+ scan_files_with_no_ext=dict(required=False, type='bool', default=True),
+ policy_status=dict(required=False, type='bool')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ self.state = parameters['state']
+ self.vserver = parameters['vserver']
+ self.policy_name = parameters['policy_name']
+ self.file_ext_to_exclude = parameters['file_ext_to_exclude']
+ self.file_ext_to_include = parameters['file_ext_to_include']
+ self.filters = parameters['filters']
+ self.is_scan_mandatory = parameters['is_scan_mandatory']
+ self.max_file_size = parameters['max_file_size']
+ self.paths_to_exclude = parameters['paths_to_exclude']
+ self.scan_files_with_no_ext = parameters['scan_files_with_no_ext']
+ self.policy_status = parameters['policy_status']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def exists_access_policy(self, policy_obj=None):
+ """
+ Check if a Vscan Access policy exists
+ :return: True if Exist, False if it does not
+ """
+ if policy_obj is None:
+ policy_obj = self.return_on_access_policy()
+ if policy_obj:
+ return True
+ else:
+ return False
+
+ def return_on_access_policy(self):
+ """
+ Return a Vscan on Access Policy
+ :return: None if there is no access policy, return the policy if there is
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-get-iter')
+ access_policy_info = netapp_utils.zapi.NaElement('vscan-on-access-policy-info')
+ access_policy_info.add_new_child('policy-name', self.policy_name)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(access_policy_info)
+ access_policy_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records'):
+ if int(result.get_child_content('num-records')) == 1:
+ return result
+ elif int(result.get_child_content('num-records')) > 1:
+ self.module.fail_json(msg='Mutiple Vscan on Access Policy matching %s:' % self.policy_name)
+ return None
+
+ def create_on_access_policy(self):
+ """
+ Create a Vscan on Access policy
+ :return: none
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-create')
+ access_policy_obj.add_new_child('policy-name', self.policy_name)
+ access_policy_obj.add_new_child('protocol', 'cifs')
+ access_policy_obj = self._fill_in_access_policy(access_policy_obj)
+
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+
+ def status_modify_on_access_policy(self):
+ """
+ Update the status of policy
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-status-modify')
+ access_policy_obj.add_new_child('policy-name', self.policy_name)
+ access_policy_obj.add_new_child('policy-status', str(self.policy_status).lower())
+
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying status Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+
+ def delete_on_access_policy(self):
+ """
+ Delete a Vscan On Access Policy
+ :return:
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-delete')
+ access_policy_obj.add_new_child('policy-name', self.policy_name)
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Deleting Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+
+ def modify_on_access_policy(self):
+ """
+ Modify a Vscan On Access policy
+ :return: nothing
+ """
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-modify')
+ access_policy_obj.add_new_child('policy-name', self.policy_name)
+ access_policy_obj = self._fill_in_access_policy(access_policy_obj)
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Modifying Vscan on Access Policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+
+ def _fill_in_access_policy(self, access_policy_obj):
+ if self.is_scan_mandatory is not None:
+ access_policy_obj.add_new_child('is-scan-mandatory', str(self.is_scan_mandatory).lower())
+ if self.max_file_size:
+ access_policy_obj.add_new_child('max-file-size', str(self.max_file_size))
+ if self.scan_files_with_no_ext is not None:
+ access_policy_obj.add_new_child('scan-files-with-no-ext', str(self.scan_files_with_no_ext))
+ if self.file_ext_to_exclude:
+ ext_obj = netapp_utils.zapi.NaElement('file-ext-to-exclude')
+ access_policy_obj.add_child_elem(ext_obj)
+ for extension in self.file_ext_to_exclude:
+ ext_obj.add_new_child('file-extension', extension)
+ if self.file_ext_to_include:
+ ext_obj = netapp_utils.zapi.NaElement('file-ext-to-include')
+ access_policy_obj.add_child_elem(ext_obj)
+ for extension in self.file_ext_to_include:
+ ext_obj.add_new_child('file-extension', extension)
+ if self.filters:
+ ui_filter_obj = netapp_utils.zapi.NaElement('filters')
+ access_policy_obj.add_child_elem(ui_filter_obj)
+ for filter in self.filters:
+ ui_filter_obj.add_new_child('vscan-on-access-policy-ui-filter', filter)
+ if self.paths_to_exclude:
+ path_obj = netapp_utils.zapi.NaElement('paths-to-exclude')
+ access_policy_obj.add_child_elem(path_obj)
+ for path in self.paths_to_exclude:
+ path_obj.add_new_child('file-path', path)
+ return access_policy_obj
+
+ def has_policy_changed(self):
+ results = self.return_on_access_policy()
+ if results is None:
+ return False
+ try:
+ policy_obj = results.get_child_by_name('attributes-list').get_child_by_name('vscan-on-access-policy-info')
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Accessing on access policy %s: %s' %
+ (self.policy_name, to_native(error)), exception=traceback.format_exc())
+ if self.is_scan_mandatory is not None:
+ if str(self.is_scan_mandatory).lower() != policy_obj.get_child_content('is-scan-mandatory'):
+ return True
+ if self.policy_status is not None:
+ if str(self.policy_status).lower() != policy_obj.get_child_content('is-policy-enabled'):
+ return True
+ if self.max_file_size:
+ if self.max_file_size != int(policy_obj.get_child_content('max-file-size')):
+ return True
+ if self.scan_files_with_no_ext is not None:
+ if str(self.scan_files_with_no_ext).lower() != policy_obj.get_child_content('scan-files-with-no-ext'):
+ return True
+ if self.file_ext_to_exclude:
+ # if no file-ext-to-exclude are given at creation, XML will not have a file-ext-to-exclude
+ if policy_obj.get_child_by_name('file-ext-to-exclude') is None:
+ return True
+ current_to_exclude = []
+ for each in policy_obj.get_child_by_name('file-ext-to-exclude').get_children():
+ current_to_exclude.append(each.get_content())
+ k = self._diff(self.file_ext_to_exclude, current_to_exclude)
+ # If the diff returns something the lists don't match and the policy has changed
+ if k:
+ return True
+ if self.file_ext_to_include:
+ # if no file-ext-to-include are given at creation, XML will not have a file-ext-to-include
+ if policy_obj.get_child_by_name('file-ext-to-include') is None:
+ return True
+ current_to_include = []
+ for each in policy_obj.get_child_by_name('file-ext-to-include').get_children():
+ current_to_include.append(each.get_content())
+ k = self._diff(self.file_ext_to_include, current_to_include)
+ # If the diff returns something the lists don't match and the policy has changed
+ if k:
+ return True
+ if self.filters:
+ if policy_obj.get_child_by_name('filters') is None:
+ return True
+ current_filters = []
+ for each in policy_obj.get_child_by_name('filters').get_children():
+ current_filters.append(each.get_content())
+ k = self._diff(self.filters, current_filters)
+ # If the diff returns something the lists don't match and the policy has changed
+ if k:
+ return True
+ if self.paths_to_exclude:
+ if policy_obj.get_child_by_name('paths-to-exclude') is None:
+ return True
+ current_paths_to_exlude = []
+ for each in policy_obj.get_child_by_name('paths-to-exclude').get_children():
+ current_paths_to_exlude.append(each.get_content())
+ k = self._diff(self.paths_to_exclude, current_paths_to_exlude)
+ # If the diff returns something the lists don't match and the policy has changed
+ if k:
+ return True
+ return False
+
+ def _diff(self, li1, li2):
+ """
+ :param li1: list 1
+ :param li2: list 2
+ :return: a list contain items that are not on both lists
+ """
+ li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2]
+ return li_dif
+
+ def apply(self):
+ netapp_utils.ems_log_event("na_ontap_vscan_on_access_policy", self.server)
+ changed = False
+ policy_obj = self.return_on_access_policy()
+ if self.state == 'present':
+ if not self.exists_access_policy(policy_obj):
+ if not self.module.check_mode:
+ self.create_on_access_policy()
+ if self.policy_status:
+ self.status_modify_on_access_policy()
+ changed = True
+ else:
+ # Check if anything has changed first.
+ if self.has_policy_changed():
+ if not self.module.check_mode:
+ result = policy_obj.get_child_by_name('attributes-list').get_child_by_name('vscan-on-access-policy-info')
+ if str(self.policy_status).lower() != result.get_child_content('is-policy-enabled'):
+ if self.policy_status is not None:
+ self.status_modify_on_access_policy()
+ self.modify_on_access_policy()
+ changed = True
+ if self.state == 'absent':
+ if self.exists_access_policy(policy_obj):
+ if not self.module.check_mode:
+ result = policy_obj.get_child_by_name('attributes-list').get_child_by_name('vscan-on-access-policy-info')
+ if result.get_child_content('is-policy-enabled') == 'true':
+ self.status_modify_on_access_policy()
+ self.delete_on_access_policy()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanOnAccessPolicy()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py
new file mode 100644
index 00000000..80c4401f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_on_demand_task
+short_description: NetApp ONTAP Vscan on demand task configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure on demand task for Vscan
+options:
+ state:
+ description:
+ - Whether a Vscan on demand task is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ cross_junction:
+ description:
+ - Specifies whether the On-Demand task is allowed to cross volume junctions
+ type: bool
+ default: False
+
+ directory_recursion:
+ description:
+ - Specifies whether the On-Demand task is allowed to recursively scan through sub-directories.
+ type: bool
+ default: False
+
+ file_ext_to_exclude:
+ description:
+ - File-Extensions for which scanning must not be performed.
+ - File whose extension matches with both inclusion and exclusion list is not considered for scanning.
+ type: list
+ elements: str
+
+ file_ext_to_include:
+ description:
+ - File extensions for which scanning is considered.
+ - The default value is '*', which means that all files are considered for scanning except those which are excluded from scanning.
+ - File whose extension matches with both inclusion and exclusion list is not considered for scanning.
+ type: list
+ elements: str
+
+ max_file_size:
+ description:
+ - Max file-size (in bytes) allowed for scanning. The default value of 10737418240 (10GB) is taken if not provided at the time of creating a task.
+ type: str
+
+ paths_to_exclude:
+ description:
+ - File-paths for which scanning must not be performed.
+ type: list
+ elements: str
+
+ report_directory:
+ description:
+ - Path from the vserver root where task report is created. The path must be a directory and provided in unix-format from the root of the Vserver.
+ - Example /vol1/on-demand-reports.
+ type: str
+
+ report_log_level:
+ description:
+ - Log level for the On-Demand report.
+ choices: ['verbose', 'info', 'error']
+ type: str
+ default: error
+
+ request_timeout:
+ description:
+ - Total request-service time-limit in seconds. If the virus-scanner does not respond within the provided time, scan will be timedout.
+ type: str
+
+ scan_files_with_no_ext:
+ description:
+ - Specifies whether files without any extension are considered for scanning or not.
+ type: bool
+ default: True
+
+ scan_paths:
+ description:
+ - List of paths that need to be scanned. The path must be provided in unix-format and from the root of the Vserver.
+ - Example /vol1/large_files.
+ type: list
+ elements: str
+
+ scan_priority:
+ description:
+ - Priority of the On-Demand scan requests generated by this task.
+ choices: ['low', 'normal']
+ type: str
+ default: low
+
+ schedule:
+ description:
+ - Schedule of the task. The task will be run as per the schedule.
+ - For running the task immediately, vscan-on-demand-task-run api must be used after creating a task.
+ type: str
+
+ task_name:
+ description:
+ - Name of the task.
+ type: str
+ required: True
+'''
+
+
+EXAMPLES = """
+ - name: Create Vscan On Demand Task
+ na_ontap_vscan_on_demand_task:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ task_name: carchiOnDemand
+ scan_paths: /
+ report_directory: /
+ file_ext_to_exclude: ['py', 'yml']
+ max_file_size: 10737418241
+ paths_to_exclude: ['/tmp', '/var']
+ report_log_level: info
+ request_timeout: 60
+
+ - name: Delete Vscan On Demand Task
+ na_ontap_vscan_on_demand_task:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ task_name: carchiOnDemand
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscanOnDemandTask(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ cross_junction=dict(required=False, type='bool', default=False),
+ directory_recursion=dict(required=False, type='bool', default=False),
+ file_ext_to_exclude=dict(required=False, type='list', elements='str'),
+ file_ext_to_include=dict(required=False, type='list', elements='str'),
+ max_file_size=dict(required=False, type="str"),
+ paths_to_exclude=dict(required=False, type='list', elements='str'),
+ report_directory=dict(required=False, type='str'),
+ report_log_level=dict(required=False, type='str', choices=['verbose', 'info', 'error'], default='error'),
+ request_timeout=dict(required=False, type='str'),
+ scan_files_with_no_ext=dict(required=False, type='bool', default=True),
+ scan_paths=dict(required=False, type='list', elements='str'),
+ scan_priority=dict(required=False, type='str', choices=['low', 'normal'], default='low'),
+ schedule=dict(required=False, type="str"),
+ task_name=dict(required=True, type="str")
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ["state", "present", ["report_directory", "scan_paths"]]
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_demand_task(self):
+ """
+ Get a demand task
+ :return: A vscan-on-demand-task-info or None
+ """
+ demand_task_iter = netapp_utils.zapi.NaElement("vscan-on-demand-task-get-iter")
+ demand_task_info = netapp_utils.zapi.NaElement("vscan-on-demand-task-info")
+ demand_task_info.add_new_child('task-name', self.parameters['task_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(demand_task_info)
+ demand_task_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(demand_task_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Vscan on demand task %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result.get_child_by_name('attributes-list').get_child_by_name('vscan-on-demand-task-info')
+ return None
+
+ def create_demand_task(self):
+ """
+ Create a Demand Task
+ :return: None
+ """
+ demand_task_obj = netapp_utils.zapi.NaElement("vscan-on-demand-task-create")
+ # Required items first
+ demand_task_obj.add_new_child('report-directory', self.parameters['report_directory'])
+ demand_task_obj.add_new_child('task-name', self.parameters['task_name'])
+ scan_paths = netapp_utils.zapi.NaElement("scan-paths")
+ for scan_path in self.parameters['scan_paths']:
+ scan_paths.add_new_child('string', scan_path)
+ demand_task_obj.add_child_elem(scan_paths)
+ # Optional items next
+ if self.parameters.get('cross_junction'):
+ demand_task_obj.add_new_child('cross-junction', str(self.parameters['cross_junction']).lower())
+ if self.parameters.get('directory_recursion'):
+ demand_task_obj.add_new_child('directory-recursion', str(self.parameters['directory_recursion']).lower())
+ if self.parameters.get('file_ext_to_exclude'):
+ ext_to_exclude_obj = netapp_utils.zapi.NaElement('file-ext-to-exclude')
+ for exclude_file in self.parameters['file_ext_to_exclude']:
+ ext_to_exclude_obj.add_new_child('file-extension', exclude_file)
+ demand_task_obj.add_child_elem(ext_to_exclude_obj)
+ if self.parameters.get('file_ext_to_include'):
+ ext_to_include_obj = netapp_utils.zapi.NaElement('file-ext-to-include')
+ for include_file in self.parameters['file_ext_to_exclude']:
+ ext_to_include_obj.add_child_elem(include_file)
+ demand_task_obj.add_child_elem(ext_to_include_obj)
+ if self.parameters.get('max_file_size'):
+ demand_task_obj.add_new_child('max-file-size', self.parameters['max_file_size'])
+ if self.parameters.get('paths_to_exclude'):
+ exclude_paths = netapp_utils.zapi.NaElement('paths-to-exclude')
+ for path in self.parameters['paths_to_exclude']:
+ exclude_paths.add_new_child('string', path)
+ demand_task_obj.add_child_elem(exclude_paths)
+ if self.parameters.get('report_log_level'):
+ demand_task_obj.add_new_child('report-log-level', self.parameters['report_log_level'])
+ if self.parameters.get('request_timeout'):
+ demand_task_obj.add_new_child('request-timeout', self.parameters['request_timeout'])
+ if self.parameters.get('scan_files_with_no_ext'):
+ demand_task_obj.add_new_child('scan-files-with-no-ext', str(self.parameters['scan_files_with_no_ext']).lower())
+ if self.parameters.get('scan_priority'):
+ demand_task_obj.add_new_child('scan-priority', self.parameters['scan_priority'].lower())
+ if self.parameters.get('schedule'):
+ demand_task_obj.add_new_child('schedule', self.parameters['schedule'])
+ try:
+ self.server.invoke_successfully(demand_task_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating on demand task %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_demand_task(self):
+ """
+ Delete a Demand Task"
+ :return:
+ """
+ demand_task_obj = netapp_utils.zapi.NaElement('vscan-on-demand-task-delete')
+ demand_task_obj.add_new_child('task-name', self.parameters['task_name'])
+ try:
+ self.server.invoke_successfully(demand_task_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting on demand task, %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_vscan_on_demand_task")
+ current = self.get_demand_task()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_demand_task()
+ elif cd_action == 'delete':
+ self.delete_demand_task()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanOnDemandTask()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py
new file mode 100644
index 00000000..03919e68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_vscan_scanner_pool
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_scanner_pool
+short_description: NetApp ONTAP Vscan Scanner Pools Configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete a Vscan Scanner Pool
+options:
+ state:
+ description:
+ - Whether a Vscan Scanner pool is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ hostnames:
+ description:
+ - List of hostnames of Vscan servers which are allowed to connect to Data ONTAP
+ type: list
+ elements: str
+
+ privileged_users:
+ description:
+ - List of privileged usernames. Username must be in the form "domain-name\\user-name"
+ type: list
+ elements: str
+
+ scanner_pool:
+ description:
+ - the name of the virus scanner pool
+ required: true
+ type: str
+
+ scanner_policy:
+ description:
+ - The name of the Virus scanner Policy
+ choices: ['primary', 'secondary', 'idle']
+ type: str
+'''
+
+EXAMPLES = """
+- name: Create and enable Scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ hostnames: ['name', 'name2']
+ privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi']
+ scanner_pool: Scanner1
+ scanner_policy: primary
+
+- name: Modify scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ hostnames: ['name', 'name2', 'name3']
+ privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi', 'sim.rtp.openeng.netapp.com\\chuyic']
+ scanner_pool: Scanner1
+
+- name: Delete a scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ scanner_pool: Scanner1
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscanScannerPool(object):
+ ''' create, modify, delete vscan scanner pool '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ hostnames=dict(required=False, type='list', elements='str'),
+ privileged_users=dict(required=False, type='list', elements='str'),
+ scanner_pool=dict(required=True, type='str'),
+ scanner_policy=dict(required=False, type='str', choices=['primary', 'secondary', 'idle'])
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def create_scanner_pool(self):
+ """
+ Create a Vscan Scanner Pool
+ :return: nothing
+ """
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-create')
+ if self.parameters['hostnames']:
+ string_obj = netapp_utils.zapi.NaElement('hostnames')
+ scanner_pool_obj.add_child_elem(string_obj)
+ for hostname in self.parameters['hostnames']:
+ string_obj.add_new_child('string', hostname)
+ if self.parameters['privileged_users']:
+ users_obj = netapp_utils.zapi.NaElement('privileged-users')
+ scanner_pool_obj.add_child_elem(users_obj)
+ for user in self.parameters['privileged_users']:
+ users_obj.add_new_child('privileged-user', user)
+ scanner_pool_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_policy'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply_policy(self):
+ """
+ Apply a Scanner policy to a Scanner pool
+ :return: nothing
+ """
+ apply_policy_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-apply-policy')
+ apply_policy_obj.add_new_child('scanner-policy', self.parameters['scanner_policy'])
+ apply_policy_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(apply_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error appling policy %s to pool %s: %s' %
+ (self.parameters['scanner_policy'], self.parameters['scanner_policy'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_scanner_pool(self):
+ """
+ Check to see if a scanner pool exist or not
+ :return: True if it exist, False if it does not
+ """
+ return_value = None
+ if self.use_rest:
+ pass
+ else:
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-get-iter')
+ scanner_pool_info = netapp_utils.zapi.NaElement('vscan-scanner-pool-info')
+ scanner_pool_info.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ scanner_pool_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(scanner_pool_info)
+ scanner_pool_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ if result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info').get_child_content(
+ 'scanner-pool') == self.parameters['scanner_pool']:
+ scanner_pool_obj = result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info')
+ hostname = [host.get_content() for host in
+ scanner_pool_obj.get_child_by_name('hostnames').get_children()]
+ privileged_users = [user.get_content() for user in
+ scanner_pool_obj.get_child_by_name('privileged-users').get_children()]
+ return_value = {
+ 'hostnames': hostname,
+ 'enable': scanner_pool_obj.get_child_content('is-currently-active'),
+ 'privileged_users': privileged_users,
+ 'scanner_pool': scanner_pool_obj.get_child_content('scanner-pool'),
+ 'scanner_policy': scanner_pool_obj.get_child_content('scanner-policy')
+ }
+ return return_value
+
+ def delete_scanner_pool(self):
+ """
+ Delete a Scanner pool
+ :return: nothing
+ """
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-delete')
+ scanner_pool_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_scanner_pool(self, modify):
+ """
+ Modify a scanner pool
+ :return: nothing
+ """
+ vscan_pool_modify = netapp_utils.zapi.NaElement('vscan-scanner-pool-modify')
+ vscan_pool_modify.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ for key in modify:
+ if key == 'privileged_users':
+ users_obj = netapp_utils.zapi.NaElement('privileged-users')
+ vscan_pool_modify.add_child_elem(users_obj)
+ for user in modify['privileged_users']:
+ users_obj.add_new_child('privileged-user', user)
+ elif key == 'hostnames':
+ string_obj = netapp_utils.zapi.NaElement('hostnames')
+ vscan_pool_modify.add_child_elem(string_obj)
+ for hostname in modify['hostnames']:
+ string_obj.add_new_child('string', hostname)
+ elif key != 'scanner_policy':
+ vscan_pool_modify.add_new_child(self.attribute_to_name(key), str(modify[key]))
+
+ try:
+ self.server.invoke_successfully(vscan_pool_modify, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ if self.use_rest:
+ # TODO: logging for Rest
+ return
+ else:
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ self.asup_log_for_cserver("na_ontap_vscan_scanner_pool")
+ scanner_pool_obj = self.get_scanner_pool()
+ cd_action = self.na_helper.get_cd_action(scanner_pool_obj, self.parameters)
+ if self.parameters['state'] == 'present' and cd_action is None:
+ # TODO We need to update the module to support modify
+ modify = self.na_helper.get_modified_attributes(scanner_pool_obj, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_scanner_pool()
+ if self.parameters.get('scanner_policy') is not None:
+ self.apply_policy()
+ elif cd_action == 'delete':
+ self.delete_scanner_pool()
+ elif modify:
+ self.modify_scanner_pool(modify)
+ if self.parameters.get('scanner_policy') is not None:
+ self.apply_policy()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanScannerPool()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py
new file mode 100644
index 00000000..b7e3c67c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+---
+module: na_ontap_vserver_cifs_security
+short_description: NetApp ONTAP vserver CIFS security modification
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - modify vserver CIFS security.
+
+options:
+
+ vserver:
+ description:
+ - name of the vserver.
+ required: true
+ type: str
+
+ kerberos_clock_skew:
+ description:
+ - The clock skew in minutes is the tolerance for accepting tickets with time stamps that do not exactly match the host's system clock.
+ type: int
+
+ kerberos_ticket_age:
+ description:
+ - Determine the maximum amount of time in hours that a user's ticket may be used for the purpose of Kerberos authentication.
+ type: int
+
+ kerberos_renew_age:
+ description:
+ - Determine the maximum amount of time in days for which a ticket can be renewed.
+ type: int
+
+ kerberos_kdc_timeout:
+ description:
+ - Determine the timeout value in seconds for KDC connections.
+ type: int
+
+ is_signing_required:
+ description:
+ - Determine whether signing is required for incoming CIFS traffic.
+ type: bool
+
+ is_password_complexity_required:
+ description:
+ - Determine whether password complexity is required for local users.
+ type: bool
+
+ is_aes_encryption_enabled:
+ description:
+ - Determine whether AES-128 and AES-256 encryption mechanisms are enabled for Kerberos-related CIFS communication.
+ type: bool
+
+ is_smb_encryption_required:
+ description:
+ - Determine whether SMB encryption is required for incoming CIFS traffic.
+ type: bool
+
+ lm_compatibility_level:
+ description:
+ - Determine the LM compatibility level.
+ choices: ['lm_ntlm_ntlmv2_krb', 'ntlm_ntlmv2_krb', 'ntlmv2_krb', 'krb']
+ type: str
+
+ referral_enabled_for_ad_ldap:
+ description:
+ - Determine whether LDAP referral chasing is enabled or not for AD LDAP connections.
+ type: bool
+
+ session_security_for_ad_ldap:
+ description:
+ - Determine the level of security required for LDAP communications.
+ choices: ['none', 'sign', 'seal']
+ type: str
+
+ smb1_enabled_for_dc_connections:
+ description:
+ - Determine if SMB version 1 is used for connections to domain controllers.
+ choices: ['false', 'true', 'system_default']
+ type: str
+
+ smb2_enabled_for_dc_connections:
+ description:
+ - Determine if SMB version 2 is used for connections to domain controllers.
+ choices: ['false', 'true', 'system_default']
+ type: str
+
+ use_start_tls_for_ad_ldap:
+ description:
+ - Determine whether to use start_tls for AD LDAP connections.
+ type: bool
+
+'''
+
+EXAMPLES = '''
+ - name: modify cifs security
+ na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ is_aes_encryption_enabled: false
+ lm_compatibility_level: lm_ntlm_ntlmv2_krb
+ smb1_enabled_for_dc_connections: system_default
+ smb2_enabled_for_dc_connections: system_default
+ use_start_tls_for_ad_ldap: false
+ referral_enabled_for_ad_ldap: false
+ session_security_for_ad_ldap: none
+ is_signing_required: false
+ is_password_complexity_required: false
+
+ - name: modify cifs security is_smb_encryption_required
+ na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ is_smb_encryption_required: false
+
+ - name: modify cifs security int options
+ na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ kerberos_clock_skew: 10
+ kerberos_ticket_age: 10
+ kerberos_renew_age: 5
+ kerberos_kdc_timeout: 3
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCifsSecurity(object):
+ '''
+ modify vserver cifs security
+ '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ kerberos_clock_skew=dict(required=False, type='int'),
+ kerberos_ticket_age=dict(required=False, type='int'),
+ kerberos_renew_age=dict(required=False, type='int'),
+ kerberos_kdc_timeout=dict(required=False, type='int'),
+ is_signing_required=dict(required=False, type='bool'),
+ is_password_complexity_required=dict(required=False, type='bool'),
+ is_aes_encryption_enabled=dict(required=False, type='bool'),
+ is_smb_encryption_required=dict(required=False, type='bool'),
+ lm_compatibility_level=dict(required=False, choices=['lm_ntlm_ntlmv2_krb', 'ntlm_ntlmv2_krb', 'ntlmv2_krb', 'krb']),
+ referral_enabled_for_ad_ldap=dict(required=False, type='bool'),
+ session_security_for_ad_ldap=dict(required=False, choices=['none', 'sign', 'seal']),
+ smb1_enabled_for_dc_connections=dict(required=False, choices=['false', 'true', 'system_default']),
+ smb2_enabled_for_dc_connections=dict(required=False, choices=['false', 'true', 'system_default']),
+ use_start_tls_for_ad_ldap=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+
+ self.na_helper.zapi_int_keys = {
+ 'kerberos_clock_skew': 'kerberos-clock-skew',
+ 'kerberos_ticket_age': 'kerberos-ticket-age',
+ 'kerberos_renew_age': 'kerberos-renew-age',
+ 'kerberos_kdc_timeout': 'kerberos-kdc-timeout'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'is_signing_required': 'is-signing-required',
+ 'is_password_complexity_required': 'is-password-complexity-required',
+ 'is_aes_encryption_enabled': 'is-aes-encryption-enabled',
+ 'is_smb_encryption_required': 'is-smb-encryption-required',
+ 'referral_enabled_for_ad_ldap': 'referral-enabled-for-ad-ldap',
+ 'use_start_tls_for_ad_ldap': 'use-start-tls-for-ad-ldap'
+ }
+ self.na_helper.zapi_str_keys = {
+ 'lm_compatibility_level': 'lm-compatibility-level',
+ 'session_security_for_ad_ldap': 'session-security-for-ad-ldap',
+ 'smb1_enabled_for_dc_connections': 'smb1-enabled-for-dc-connections',
+ 'smb2_enabled_for_dc_connections': 'smb2-enabled-for-dc-connections'
+ }
+
+ def cifs_security_get_iter(self):
+ """
+ get current vserver cifs security.
+ :return: a dict of vserver cifs security
+ """
+ cifs_security_get = netapp_utils.zapi.NaElement('cifs-security-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cifs_security = netapp_utils.zapi.NaElement('cifs-security')
+ cifs_security.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(cifs_security)
+ cifs_security_get.add_child_elem(query)
+ cifs_security_details = dict()
+ try:
+ result = self.server.invoke_successfully(cifs_security_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cifs security from %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ cifs_security_info = result.get_child_by_name('attributes-list').get_child_by_name('cifs-security')
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ cifs_security_details[option] = self.na_helper.get_value_for_int(from_zapi=True, value=cifs_security_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ cifs_security_details[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=cifs_security_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ if cifs_security_info.get_child_content(zapi_key) is None:
+ cifs_security_details[option] = None
+ else:
+ cifs_security_details[option] = cifs_security_info.get_child_content(zapi_key)
+ return cifs_security_details
+ return None
+
+ def cifs_security_modify(self, modify):
+ """
+ :param modify: A list of attributes to modify
+ :return: None
+ """
+ cifs_security_modify = netapp_utils.zapi.NaElement('cifs-security-modify')
+ for attribute in modify:
+ cifs_security_modify.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(cifs_security_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs security on %s: %s'
+ % (self.parameters['vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def apply(self):
+ """Call modify operations."""
+ self.asup_log_for_cserver("na_ontap_vserver_cifs_security")
+ current = self.cifs_security_get_iter()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify:
+ self.cifs_security_modify(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+
+def main():
+ obj = NetAppONTAPCifsSecurity()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py
new file mode 100644
index 00000000..90a4c077
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete vserver peer
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_vserver_peer
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - Whether the specified vserver peer should exist or not.
+ default: present
+ vserver:
+ description:
+ - Specifies name of the source Vserver in the relationship.
+ required: true
+ type: str
+ applications:
+ type: list
+ elements: str
+ description:
+ - List of applications which can make use of the peering relationship.
+ - FlexCache supported from ONTAP 9.5 onwards.
+ peer_vserver:
+ description:
+ - Specifies name of the peer Vserver in the relationship.
+ required: true
+ type: str
+ peer_cluster:
+ description:
+ - Specifies name of the peer Cluster.
+ - Required for creating the vserver peer relationship with a remote cluster
+ type: str
+ dest_hostname:
+ description:
+ - Destination hostname or IP address.
+ - Required for creating the vserver peer relationship with a remote cluster
+ type: str
+ dest_username:
+ description:
+ - Destination username.
+ - Optional if this is same as source username.
+ type: str
+ dest_password:
+ description:
+ - Destination password.
+ - Optional if this is same as source password.
+ type: str
+short_description: NetApp ONTAP Vserver peering
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ - name: Source vserver peer create
+ na_ontap_vserver_peer:
+ state: present
+ peer_vserver: ansible2
+ peer_cluster: ansibleCluster
+ vserver: ansible
+ applications: ['snapmirror']
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ dest_hostname: "{{ netapp_dest_hostname }}"
+
+ - name: vserver peer delete
+ na_ontap_vserver_peer:
+ state: absent
+ peer_vserver: ansible2
+ vserver: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPVserverPeer(object):
+ """
+ Class with vserver peer methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ peer_vserver=dict(required=True, type='str'),
+ peer_cluster=dict(required=False, type='str'),
+ applications=dict(required=False, type='list', elements='str'),
+ dest_hostname=dict(required=False, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ if self.parameters.get('dest_hostname'):
+ self.module.params['hostname'] = self.parameters['dest_hostname']
+ if self.parameters.get('dest_username'):
+ self.module.params['username'] = self.parameters['dest_username']
+ if self.parameters.get('dest_password'):
+ self.module.params['password'] = self.parameters['dest_password']
+ self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # reset to source host connection for asup logs
+ self.module.params['hostname'] = self.parameters['hostname']
+ self.module.params['username'] = self.parameters['username']
+ self.module.params['password'] = self.parameters['password']
+
+ def vserver_peer_get_iter(self):
+ """
+ Compose NaElement object to query current vserver using peer-vserver and vserver parameters
+ :return: NaElement object for vserver-get-iter with query
+ """
+ vserver_peer_get = netapp_utils.zapi.NaElement('vserver-peer-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ vserver_peer_info = netapp_utils.zapi.NaElement('vserver-peer-info')
+ vserver_peer_info.add_new_child('peer-vserver', self.parameters['peer_vserver'])
+ vserver_peer_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(vserver_peer_info)
+ vserver_peer_get.add_child_elem(query)
+ return vserver_peer_get
+
+ def vserver_peer_get(self):
+ """
+ Get current vserver peer info
+ :return: Dictionary of current vserver peer details if query successful, else return None
+ """
+ vserver_peer_get_iter = self.vserver_peer_get_iter()
+ vserver_info = dict()
+ try:
+ result = self.server.invoke_successfully(vserver_peer_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ # return vserver peer details
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ vserver_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('vserver-peer-info')
+ vserver_info['peer_vserver'] = vserver_peer_info.get_child_content('peer-vserver')
+ vserver_info['vserver'] = vserver_peer_info.get_child_content('vserver')
+ vserver_info['peer_state'] = vserver_peer_info.get_child_content('peer-state')
+ return vserver_info
+ return None
+
+ def vserver_peer_delete(self):
+ """
+ Delete a vserver peer
+ """
+ vserver_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-delete', **{'peer-vserver': self.parameters['peer_vserver'],
+ 'vserver': self.parameters['vserver']})
+ try:
+ self.server.invoke_successfully(vserver_peer_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_peer_cluster_name(self):
+ """
+ Get local cluster name
+ :return: cluster name
+ """
+ cluster_info = netapp_utils.zapi.NaElement('cluster-identity-get')
+ try:
+ result = self.server.invoke_successfully(cluster_info, enable_tunneling=True)
+ return result.get_child_by_name('attributes').get_child_by_name(
+ 'cluster-identity-info').get_child_content('cluster-name')
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching peer cluster name for peer vserver %s: %s'
+ % (self.parameters['peer_vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def vserver_peer_create(self):
+ """
+ Create a vserver peer
+ """
+ if self.parameters.get('applications') is None:
+ self.module.fail_json(msg='applications parameter is missing')
+ if self.parameters.get('peer_cluster') is not None and self.parameters.get('dest_hostname') is None:
+ self.module.fail_json(msg='dest_hostname is required for peering a vserver in remote cluster')
+ if self.parameters.get('peer_cluster') is None:
+ self.parameters['peer_cluster'] = self.get_peer_cluster_name()
+ vserver_peer_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-create', **{'peer-vserver': self.parameters['peer_vserver'],
+ 'vserver': self.parameters['vserver'],
+ 'peer-cluster': self.parameters['peer_cluster']})
+ applications = netapp_utils.zapi.NaElement('applications')
+ for application in self.parameters['applications']:
+ applications.add_new_child('vserver-peer-application', application)
+ vserver_peer_create.add_child_elem(applications)
+ try:
+ self.server.invoke_successfully(vserver_peer_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def is_remote_peer(self):
+ if self.parameters.get('dest_hostname') is None or \
+ (self.parameters['dest_hostname'] == self.parameters['hostname']):
+ return False
+ return True
+
+ def vserver_peer_accept(self):
+ """
+ Accept a vserver peer at destination
+ """
+ # peer-vserver -> remote (source vserver is provided)
+ # vserver -> local (destination vserver is provided)
+ vserver_peer_accept = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-accept', **{'peer-vserver': self.parameters['vserver'],
+ 'vserver': self.parameters['peer_vserver']})
+ try:
+ self.dest_server.invoke_successfully(vserver_peer_accept, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error accepting vserver peer %s: %s'
+ % (self.parameters['peer_vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+ def apply(self):
+ """
+ Apply action to create/delete or accept vserver peer
+ """
+ self.asup_log_for_cserver("na_ontap_vserver_peer")
+ current = self.vserver_peer_get()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.vserver_peer_create()
+ # accept only if the peer relationship is on a remote cluster
+ if self.is_remote_peer():
+ self.vserver_peer_accept()
+ elif cd_action == 'delete':
+ self.vserver_peer_delete()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPVserverPeer()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py
new file mode 100644
index 00000000..c98c24f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Loop over an ONTAP get status request until a condition is satisfied.
+ - Report a timeout error if C(timeout) is exceeded while waiting for the condition.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_wait_for_condition
+short_description: NetApp ONTAP wait_for_condition. Loop over a get status request until a condition is met.
+version_added: 20.8.0
+options:
+ name:
+ description:
+ - The name of the event to check for.
+ choices: ['sp_upgrade', 'sp_version']
+ type: str
+ required: true
+ state:
+ description:
+ - whether the conditions should be present or absent.
+ - if C(present), the module exits when any of the conditions is observed.
+ - if C(absent), the module exits with success when None of the conditions is observed.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ conditions:
+ description:
+ - one or more conditions to match
+ - for instance C(is_in_progress) for C(sp_upgrade), C(firmware_version) for C(sp_version).
+ type: list
+ elements: str
+ required: true
+ polling_interval:
+ description:
+ - how ofen to check for the conditions, in seconds.
+ default: 5
+ type: int
+ timeout:
+ description:
+ - how long to wait for the conditions, in seconds.
+ default: 180
+ type: int
+ attributes:
+ description:
+ - a dictionary of custom attributes for the event.
+ - for instance, C(sp_upgrade), C(sp_version) require C(node).
+ - C(sp_version) requires C(expectd_version).
+ type: dict
+'''
+
+EXAMPLES = """
+ - name: wait for sp_upgrade in progress
+ na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_upgrade
+ conditions: is_in_progress
+ attributes:
+ node: "{{ node }}"
+ polling_interval: 30
+ timeout: 1800
+
+ - name: wait for sp_upgrade not in progress
+ na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_upgrade
+ conditions: is_in_progress
+ state: absent
+ attributes:
+ node: "{{ ontap_admin_ip }}"
+ polling_interval: 30
+ timeout: 1800
+
+ - name: wait for sp_version to match 3.9
+ na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_version
+ conditions: firmware_version
+ state: present
+ attributes:
+ node: "{{ ontap_admin_ip }}"
+ expected_version: 3.9
+ polling_interval: 30
+ timeout: 1800
+"""
+
+RETURN = """
+states:
+ description:
+ - summarized list of observed states while waiting for completion
+ - reported for success or timeout error
+ returned: always
+ type: str
+last_state:
+ description: last observed state for event
+ returned: always
+ type: str
+"""
+
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPWFC(object):
+ ''' wait for a resource to match a condition or not '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', choices=['sp_upgrade', 'sp_version']),
+ conditions=dict(required=True, type='list', elements='str'),
+ polling_interval=dict(required=False, type='int', default=5),
+ timeout=dict(required=False, type='int', default=180),
+ attributes=dict(required=False, type='dict')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('name', 'sp_upgrade', ['attributes']),
+ ('name', 'sp_version', ['attributes']),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.states = list()
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ self.resource_configuration = dict(
+ sp_upgrade=dict(
+ required_attributes=['node'],
+ conditions=dict(
+ is_in_progress=('is-in-progress', "true")
+ )
+ ),
+ sp_version=dict(
+ required_attributes=['node', 'expected_version'],
+ conditions=dict(
+ firmware_version=('firmware-version', self.parameters['attributes'].get('expected_version'))
+ )
+ )
+ )
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ try:
+ netapp_utils.ems_log_event(event_name, cserver)
+ except netapp_utils.zapi.NaApiError:
+ pass
+
+ def get_key_value(self, xml, key):
+ for child in xml.get_children():
+ value = xml.get_child_content(key)
+ if value is not None:
+ return value
+ value = self.get_key_value(child, key)
+ if value is not None:
+ return value
+ return None
+
+ def build_zapi(self, name):
+ ''' build ZAPI request based on resource name '''
+ if name == 'sp_upgrade':
+ zapi_obj = netapp_utils.zapi.NaElement("service-processor-image-update-progress-get")
+ zapi_obj.add_new_child('node', self.parameters['attributes']['node'])
+ return zapi_obj
+ if name == 'sp_version':
+ zapi_obj = netapp_utils.zapi.NaElement("service-processor-get")
+ zapi_obj.add_new_child('node', self.parameters['attributes']['node'])
+ return zapi_obj
+ raise KeyError(name)
+
+ def extract_condition(self, name, results):
+ ''' check if any of the conditions is present
+ return:
+ None, error if key is not found
+ condition, None if a key is found with expected value
+ None, None if every key does not match the expected values
+ '''
+ error = None
+ for condition, (key, value) in self.resource_configuration[name]['conditions'].items():
+ status = self.get_key_value(results, key)
+ self.states.append(str(status))
+ if status == str(value):
+ return condition, error
+ if status is None:
+ error = 'Cannot find element with name: %s in results: %s' % (key, results.to_string())
+ return None, error
+ # not found, or no match
+ return None, None
+
+ def get_condition(self, name, zapi_obj):
+ ''' calls the ZAPI and extract condition value'''
+ try:
+ results = self.server.invoke_successfully(zapi_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ error = 'Error running command %s: %s' % (self.parameters['name'], to_native(error))
+ return None, error
+
+ condition, error = self.extract_condition(name, results)
+ if error is not None:
+ self.module.fail_json(msg='Error: %s' % error)
+ if self.parameters['state'] == 'present':
+ if condition in self.parameters['conditions']:
+ return 'matched condition: %s' % condition, None
+ else:
+ if condition is None:
+ return 'conditions not matched', None
+ if condition not in self.parameters['conditions']:
+ return 'conditions not matched: found other condition: %s' % condition, None
+ return None, None
+
+ def summarize_states(self):
+ ''' replaces a long list of states with multipliers
+ eg 'false'*5
+ return:
+ state_list as str
+ last_state
+ '''
+ previous_state = None
+ count = 0
+ summary = ''
+ for state in self.states:
+ if state == previous_state:
+ count += 1
+ else:
+ if previous_state is not None:
+ summary += '%s%s' % (previous_state, '' if count == 1 else '*%d' % count)
+ count = 1
+ previous_state = state
+ if previous_state is not None:
+ summary += '%s%s' % (previous_state, '' if count == 1 else '*%d' % count)
+ last_state = self.states[-1] if self.states else ''
+ return summary, last_state
+
+ def wait_for_condition(self, name):
+ ''' calls the ZAPI and extract condition value - loop until found '''
+ time_left = self.parameters['timeout']
+ max_consecutive_error_count = 3
+ error_count = 0
+ zapi_obj = self.build_zapi(name)
+
+ while time_left > 0:
+ condition, error = self.get_condition(name, zapi_obj)
+ if error is not None:
+ error_count += 1
+ if error_count >= max_consecutive_error_count:
+ self.module.fail_json(msg='Error: %s - count: %d' % (error, error_count))
+ elif condition is not None:
+ return condition
+ time.sleep(self.parameters['polling_interval'])
+ time_left -= self.parameters['polling_interval']
+
+ error = 'Error: timeout waiting for condition%s: %s.' %\
+ ('s' if len(self.parameters['conditions']) > 1 else '',
+ ', '.join(self.parameters['conditions']))
+ states, last_state = self.summarize_states()
+ self.module.fail_json(msg=error, states=states, last_state=last_state)
+
+ def validate_resource(self, name):
+ if name not in self.resource_configuration:
+ raise KeyError('%s - configuration entry missing for resource' % name)
+
+ def validate_attributes(self, name):
+ required = self.resource_configuration[name].get('required_attributes', list())
+ msgs = list()
+ for attribute in required:
+ if attribute not in self.parameters['attributes']:
+ msgs.append('attributes: %s is required for resource name: %s' % (attribute, name))
+ if msgs:
+ self.module.fail_json(msg='Error: %s' % ', '.join(msgs))
+
+ def validate_conditions(self, name):
+ conditions = self.resource_configuration[name].get('conditions')
+ msgs = list()
+ for condition in self.parameters['conditions']:
+ if condition not in conditions:
+ msgs.append('condition: %s is not valid for resource name: %s' % (condition, name))
+ if msgs:
+ msgs.append('valid condition%s: %s' %
+ ('s are' if len(conditions) > 1 else ' is', ', '.join(conditions.keys())))
+ self.module.fail_json(msg='Error: %s' % ', '.join(msgs))
+
+ def apply(self):
+ ''' calls the ZAPI and check conditions '''
+ changed = False
+ self.asup_log_for_cserver("na_ontap_wait_for_condition: %s " % self.parameters['name'])
+ name = self.parameters['name']
+ self.validate_resource(name)
+ self.validate_attributes(name)
+ self.validate_conditions(name)
+ output = self.wait_for_condition(name)
+ states, last_state = self.summarize_states()
+ self.module.exit_json(changed=changed, msg=output, states=states, last_state=last_state)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPWFC()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py
new file mode 100644
index 00000000..78b097dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_wwpn_alias
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_wwpn_alias
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP set FCP WWPN Alias
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Create/Delete FCP WWPN Alias
+
+options:
+ state:
+ description:
+ - Whether the specified alias should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - The name of the alias to create or delete.
+ required: true
+ type: str
+
+ wwpn:
+ description:
+ - WWPN of the alias.
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Create FCP Alias
+ na_ontap_wwpn_alias:
+ state: present
+ name: alias1
+ wwpn: 01:02:03:04:0a:0b:0c:0d
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete FCP Alias
+ na_ontap_wwpn_alias:
+ state: absent
+ name: alias1
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+class NetAppOntapWwpnAlias(object):
+ ''' ONTAP WWPN alias operations '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=[
+ 'present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ wwpn=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['wwpn'])],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API should be used for ONTAP 9.6 or higher.
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_wwpn_alias'))
+
+ def get_alias(self, uuid):
+ params = {'fields': 'alias,wwpn',
+ 'alias': self.parameters['name'],
+ 'svm.uuid': uuid}
+ api = 'network/fc/wwpn-aliases'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching wwpn alias: %s" % error)
+ if message['num_records'] > 0:
+ return {'name': message['records'][0]['alias'],
+ 'wwpn': message['records'][0]['wwpn'],
+ }
+ else:
+ return None
+
+ def create_alias(self, uuid, is_modify=False):
+ params = {'alias': self.parameters['name'],
+ 'wwpn': self.parameters['wwpn'],
+ 'svm.uuid': uuid}
+ api = 'network/fc/wwpn-aliases'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ if is_modify:
+ self.module.fail_json(msg="Error on modifying wwpn alias when trying to re-create alias: %s." % error)
+ else:
+ self.module.fail_json(msg="Error on creating wwpn alias: %s." % error)
+
+ def delete_alias(self, uuid, is_modify=False):
+ api = 'network/fc/wwpn-aliases/%s/%s' % (uuid, self.parameters['name'])
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ if is_modify:
+ self.module.fail_json(msg="Error on modifying wwpn alias when trying to delete alias: %s." % error)
+ else:
+ self.module.fail_json(msg="Error on deleting wwpn alias: %s." % error)
+
+ def get_svm_uuid(self):
+ """
+ Get a svm's UUID
+ :return: uuid of the svm.
+ """
+ params = {'fields': 'uuid', 'name': self.parameters['vserver']}
+ api = "svm/svms"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching svm uuid: %s" % error)
+ return message['records'][0]['uuid']
+
+ def apply(self):
+ cd_action, uuid, modify = None, None, None
+ uuid = self.get_svm_uuid()
+ current = self.get_alias(uuid)
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_alias(uuid)
+ elif cd_action == 'delete':
+ self.delete_alias(uuid)
+ elif modify:
+ self.delete_alias(uuid, is_modify=True)
+ self.create_alias(uuid, is_modify=True)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ alias = NetAppOntapWwpnAlias()
+ alias.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py
new file mode 100644
index 00000000..29b9de70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Call a ZAPI on ONTAP.
+ - Cluster ZAPIs are run using a cluster admin account.
+ - Vserver ZAPIs can be run using a vsadmin account or using vserver tunneling (cluster admin with I(vserver option)).
+ - In case of success, a json dictionary is returned as C(response).
+ - In case of a ZAPI error, C(status), C(errno), C(reason) are set to help with diagnosing the issue,
+ - and the call is reported as an error ('failed').
+ - Other errors (eg connection issues) are reported as Ansible error.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_zapit
+short_description: NetApp ONTAP Run any ZAPI on ONTAP
+version_added: "20.4.0"
+options:
+ zapi:
+ description:
+ - A dictionary for the zapi and arguments.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - A single zapi can be called at a time. Ansible warns if duplicate keys are found and only uses the last entry.
+ required: true
+ type: dict
+ vserver:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+'''
+
+EXAMPLES = """
+-
+ name: Ontap ZAPI
+ hosts: localhost
+ gather_facts: False
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ admin_ip }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ https: true
+ validate_certs: false
+ svm_login: &svm_login
+ hostname: "{{ svm_admin_ip }}"
+ username: "{{ svm_admin_username }}"
+ password: "{{ svm_admin_password }}"
+ https: true
+ validate_certs: false
+
+ tasks:
+ - name: run ontap ZAPI command as cluster admin
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ system-get-version:
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as cluster admin
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ vserver-get-iter:
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as cluster admin
+ na_ontap_zapit:
+ <<: *login
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - aggr-list:
+ - aggr-name
+ - allowed-protocols:
+ - protocols
+ - vserver-aggr-info-list:
+ - vserser-aggr-info
+ - uuid
+ query:
+ vserver-info:
+ vserver-name: trident_svm
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as vsadmin
+ na_ontap_zapit:
+ <<: *svm_login
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - uuid
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as vserver tunneling
+ na_ontap_zapit:
+ <<: *login
+ vserver: trident_svm
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - uuid
+ register: output
+ - debug: var=output
+
+ - name: run ontap active-directory ZAPI command
+ na_ontap_zapit:
+ <<: *login
+ vserver: trident_svm
+ zapi:
+ active-directory-account-create:
+ account-name: testaccount
+ admin-username: testuser
+ admin-password: testpass
+ domain: testdomain
+ organizational-unit: testou
+ register: output
+ ignore_errors: True
+ - debug: var=output
+
+"""
+
+RETURN = """
+response:
+ description:
+ - If successful, a json dictionary representing the data returned by the ZAPI.
+ - If the ZAPI was executed but failed, an empty dictionary.
+ - Not present if the ZAPI call cannot be performed.
+ returned: On success
+ type: dict
+status:
+ description:
+ - If the ZAPI was executed but failed, the status set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+errno:
+ description:
+ - If the ZAPI was executed but failed, the error code set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+reason:
+ description:
+ - If the ZAPI was executed but failed, the error reason set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPZapi(object):
+ ''' calls a ZAPI command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ zapi=dict(required=True, type='dict'),
+ vserver=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.zapi = parameters['zapi']
+ self.vserver = parameters['vserver']
+
+ if not HAS_JSON:
+ self.module.fail_json(msg="the python json module is required")
+ if not HAS_XMLTODICT:
+ self.module.fail_json(msg="the python xmltodict module is required")
+ if not HAS_NETAPP_LIB:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+
+ if self.vserver is not None:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ try:
+ netapp_utils.ems_log_event(event_name, cserver)
+ except netapp_utils.zapi.NaApiError:
+ pass
+
+ def jsonify_and_parse_output(self, xml_data):
+ ''' convert from XML to JSON
+ extract status and error fields is present
+ '''
+ try:
+ as_dict = xmltodict.parse(xml_data.to_string(), xml_attribs=True)
+ except Exception as exc:
+ self.module.fail_json(msg='Error running zapi in xmltodict: %s: %s' %
+ (xml_data.to_string(), str(exc)))
+ try:
+ as_json = json.loads(json.dumps(as_dict))
+ except Exception as exc:
+ self.module.fail_json(msg='Error running zapi in json load/dump: %s: %s' %
+ (as_dict, str(exc)))
+
+ if 'results' not in as_json:
+ self.module.fail_json(msg='Error running zapi, no results field: %s: %s' %
+ (xml_data.to_string(), repr(as_json)))
+
+ # set status, and if applicable errno/reason, and remove attribute fields
+ errno = None
+ reason = None
+ response = as_json.pop('results')
+ status = response.get('@status', 'no_status_attr')
+ if status != 'passed':
+ # collect errno and reason
+ errno = response.get('@errno', None)
+ if errno is None:
+ errno = response.get('errorno', None)
+ if errno is None:
+ errno = 'ESTATUSFAILED'
+ reason = response.get('@reason', None)
+ if reason is None:
+ reason = response.get('reason', None)
+ if reason is None:
+ reason = 'Execution failure with unknown reason.'
+
+ for key in ('@status', '@errno', '@reason', '@xmlns'):
+ try:
+ # remove irrelevant info
+ del response[key]
+ except KeyError:
+ pass
+ return response, status, errno, reason
+
+ def run_zapi(self):
+ ''' calls the ZAPI '''
+ zapi_struct = self.zapi
+ error = None
+ if not isinstance(zapi_struct, dict):
+ error = 'A directory entry is expected, eg: system-get-version: '
+ zapi = zapi_struct
+ else:
+ zapi = list(zapi_struct.keys())
+ if len(zapi) != 1:
+ error = 'A single ZAPI can be called at a time'
+ else:
+ zapi = zapi[0]
+
+ # log first, then error out as needed
+ self.ems(zapi)
+ if error:
+ self.module.fail_json(msg='%s, received: %s' % (error, zapi))
+
+ zapi_obj = netapp_utils.zapi.NaElement(zapi)
+ attributes = zapi_struct[zapi]
+ if attributes is not None and attributes != 'None':
+ zapi_obj.translate_struct(attributes)
+
+ try:
+ output = self.server.invoke_elem(zapi_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error running zapi %s: %s' %
+ (zapi, to_native(error)),
+ exception=traceback.format_exc())
+
+ return self.jsonify_and_parse_output(output)
+
+ def ems(self, zapi):
+ """
+ Error out if Cluster Admin username is used with Vserver, or Vserver admin used with out vserver being set
+ :return:
+ """
+ if self.vserver:
+ try:
+ netapp_utils.ems_log_event("na_ontap_zapi" + str(zapi), self.server)
+ except netapp_utils.zapi.NaApiError as error:
+ pass
+ else:
+ self.asup_log_for_cserver("na_ontap_zapi: " + str(zapi))
+
+ def apply(self):
+ ''' calls the zapi and returns json output '''
+ response, status, errno, reason = self.run_zapi()
+ if status == 'passed':
+ self.module.exit_json(changed=True, response=response)
+ msg = 'ZAPI failure: check errno and reason.'
+ self.module.fail_json(changed=False, response=response, status=status, errno=errno, reason=reason, msg=msg)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ zapi = NetAppONTAPZapi()
+ zapi.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/LICENSE b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/LICENSE
new file mode 100644
index 00000000..0ad25db4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/LICENSE
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published
+ by the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<https://www.gnu.org/licenses/>.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/README.md b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/README.md
new file mode 100644
index 00000000..bcbfe5cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/README.md
@@ -0,0 +1,127 @@
+na_ontap_cluster_config
+=========
+
+Configure one or more of the following ONTAP settings:
+
+Licenses
+Disk Assignments
+Cluster DNS
+NTP
+SNMP
+MOTD
+Aggregates
+Ports
+Interface Groups
+VLANS
+Broadcast Domains
+Intercluster LIFs
+
+Requirements
+------------
+
+Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release.
+
+Role Variables
+--------------
+```
+cluster: <short ONTAP name of cluster>
+netapp_hostname: <ONTAP mgmt ip or fqdn>
+netapp_username: <ONTAP admin account>
+netapp_password: <ONTAP admin account password>
+
+#Based on if Variables != or == None determins if a section runs. Each variable will take one or more dictonary entries. Simply omit sections
+#that you don't want to run. The following would run all sections
+
+license_codes: AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAAAAA
+
+disks: # at current the disks module assigns all visiable disks to a node. If you are wanting to split disks, currently that has to be done manually
+ - cluster-01
+ - cluster-02
+
+motd: "The login in message you would like displayed when someone ssh's into the system"
+
+dns:
+ - { dns_domains: ansible.local, dns_nameservers: 1.1.1.1 }
+
+ntp:
+ - { server_name: time.nist.gov, version: auto }
+
+snmp:
+ - { community_name: public, access_control: ro }
+
+aggrs:
+ - { name: aggr1, node: cluster-01, disk_count: 26, max_raid: 26 }
+ - { name: aggr2, node: cluster-02, disk_count: 26, max_raid: 26 }
+
+ports: #* Ports also has variables 'autonegotiate', and 'flowcontrol' which default to true, and none but can be overriden by your playbook
+ - { node: cluster-01, port: e0c, mtu: 9000 }
+ - { node: cluster-01, port: e0d, mtu: 9000, flowcontrol: none, autonegotiate: false }
+
+
+ifgrps:
+ - { name: a0a, node: cluster-01, ports: "e0a,e0b", mode: multimode, mtu 9000 }
+ - { name: a0a, node: cluster-02, ports: "e0a,e0b", mode: multimode, mtu 9000 }
+
+vlans:
+ - { id: 201, node: cluster-01, parent: a0a }
+
+bcasts:
+ - { name: Backup, mtu: 9000, ipspace: default, ports: 'cluster-01:e0c,vsim-02:e0c' }
+
+inters:
+ - { name: intercluster_1, address: 172.32.0.187, netmask: 255.255.255.0, node: cluster-01, port: e0c }
+ - { name: intercluster_2, address: 172.32.0.188, netmask: 255.255.255.0, node: cluster-02, port: e0c }
+```
+Dependencies
+------------
+
+The tasks in this role are dependent on information from the na_ontap_gather_facts module.
+The task for na_ontap_gather_facts can not be excluded.
+
+Example Playbook
+----------------
+```
+---
+- hosts: localhost
+ collections:
+ - netapp.ontap
+ vars_files:
+ - globals.yml
+ roles:
+ - na_ontap_cluster_config
+ ```
+
+I use a globals file to hold my variables.
+```
+cluster_name: cluster
+
+netapp_hostname: 172.32.0.182
+netapp_username: admin
+netapp_password: netapp123
+
+license_codes: <removed>
+
+aggrs:
+ - { name: aggr1, node: cluster-01, disk_count: 26, max_raid: 26 }
+ - { name: aggr2, node: cluster-02, disk_count: 26, max_raid: 26 }
+
+ifgrps:
+ - { name: a0a, node: cluster-01, port: "e0a", mode: multimode }
+ - { name: a0a, node: cluster-02, port: "e0a", mode: multimode }
+ - { name: a0a, node: cluster-01, port: "e0b", mode: multimode }
+ - { name: a0a, node: cluster-02, port: "e0b", mode: multimode }
+
+inters:
+ - { name: intercluster_1, address: 172.32.0.187, netmask: 255.255.255.0, node: cluster-01, port: e0c }
+ - { name: intercluster_2, address: 172.32.0.188, netmask: 255.255.255.0, node: cluster-02, port: e0c }
+```
+
+License
+-------
+
+GNU v3
+
+Author Information
+------------------
+NetApp
+http://www.netapp.io \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/defaults/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/defaults/main.yml
new file mode 100644
index 00000000..976f64f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/defaults/main.yml
@@ -0,0 +1,25 @@
+---
+netapp_hostname:
+netapp_password:
+netapp_username:
+validate_certs: false
+license_codes:
+disks:
+motd:
+dns:
+ntp:
+snmp:
+aggrs:
+ports:
+ifgrps:
+vlans:
+bcasts:
+inters:
+cluster:
+cluster_name: temp
+autonegotiate: true
+flowcontrol: none
+distribution_function: ip
+ifgrp_mode: multimode_lacp
+bcast_ipspace: Default
+mtu: 9000
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/handlers/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/handlers/main.yml
new file mode 100644
index 00000000..8a538734
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for na-ontap-cluster-config \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/meta/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/meta/main.yml
new file mode 100644
index 00000000..76068c01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/meta/main.yml
@@ -0,0 +1,9 @@
+galaxy_info:
+ author: "NetApp"
+ description: "Role for configuring an ONTAP cluster"
+ company: "NetApp"
+ license: BSD
+ min_ansible_version: 2.8
+ platforms:
+ galaxy_tags: [netapp, ontap]
+ dependencies: []
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tasks/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tasks/main.yml
new file mode 100644
index 00000000..eec783c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tasks/main.yml
@@ -0,0 +1,203 @@
+---
+# tasks file for na-ontap-cluster-config
+- name: Setup licenses
+ na_ontap_license:
+ state: present
+ license_codes: "{{ license_codes }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ when: license_codes != None
+- name: Assign Disks
+ na_ontap_disks:
+ node: "{{ item }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ disks }}"
+ when: disks != None
+- name: Set Login Message
+ na_ontap_motd:
+ state: present
+ vserver: "{{ cluster }}"
+ message: "{{ motd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ when: motd != None
+- name: Setup DNS
+ na_ontap_dns:
+ state: present
+ vserver: "{{ cluster }}"
+ domains: "{{ item.dns_domains }}"
+ nameservers: "{{ item.dns_nameservers }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ dns }}"
+ when: dns != None
+- name: Set NTP Server
+ na_ontap_ntp:
+ state: present
+ server_name: "{{ item.server_name }}"
+ version: "{{ item.version }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ ntp }}"
+ when: ntp != None
+- name: Create SNMP community
+ na_ontap_snmp:
+ community_name: "{{ item.community_name }}"
+ access_control: "{{ item.access_control }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ snmp }}"
+ when: snmp != None
+- name: Create Aggregates
+ na_ontap_aggregate:
+ state: present
+ service_state: online
+ name: "{{ item.name }}"
+ nodes: "{{ item.node }}"
+ disk_count: "{{ item.disk_count }}"
+ raid_size: "{{ item.max_raid }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ aggrs }}"
+ when: aggrs != None
+- name: Remove ports from Default broadcast domain
+ na_ontap_broadcast_domain_ports:
+ state: absent
+ broadcast_domain: Default
+ ports: "{{ item.node }}:{{ item.port }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ ports }}"
+ when: ports != None
+
+# Configure flowcontrol and autonegotiate.
+# Skip MTU for now b/c we have to configure IFGroup first.
+- name: Modify Net Port
+ na_ontap_net_port:
+ state: present
+ node: "{{ item.node }}"
+ port: "{{ item.port }}"
+# mtu: "{{ item.mtu }}"
+ autonegotiate_admin: "{{ item.autonegotiate | default(autonegotiate) }}"
+ flowcontrol_admin: "{{ item.flowcontrol | default(flowcontrol) }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ ports }}"
+ when: ports != None
+- name: Create Interface Group
+ na_ontap_net_ifgrp:
+ state: present
+ distribution_function: "{{ distribution_function }}"
+ name: "{{ item.name }}"
+ ports: "{{ item.ports }}"
+ mode: "{{ item.mode | default(ifgrp_mode) }}"
+ node: "{{ item.node }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ ifgrps }}"
+ when: ifgrps != None
+
+# Set MTU - Interface group must be configured first.
+- name: Modify Net Port
+ na_ontap_net_port:
+ state: present
+ node: "{{ item.node }}"
+ port: "{{ item.name }}"
+ mtu: "{{ item.mtu }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ ifgrps }}"
+ when: ifgrps != None
+- name: Create VLAN
+ na_ontap_net_vlan:
+ state: present
+ vlanid: "{{ item.id }}"
+ node: "{{ item.node }}"
+ parent_interface: "{{ item.parent }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ vlans }}"
+ when: vlans != None
+- name: create broadcast domain
+ na_ontap_broadcast_domain:
+ state: present
+ broadcast_domain: "{{ item.name }}"
+ mtu: "{{ item.mtu }}"
+ ipspace: "{{ bcast_ipspace }}"
+ ports: "{{ item.ports }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ bcasts }}"
+ when: bcasts != None
+- name: Create Intercluster Lif
+ na_ontap_interface:
+ state: present
+ interface_name: "{{ item.name }}"
+ home_port: "{{ item.port }}"
+ home_node: "{{ item.node }}"
+ role: intercluster
+ admin_status: up
+ failover_policy: local-only
+ is_auto_revert: true
+ address: "{{ item.address }}"
+ netmask: "{{ item.netmask }}"
+ vserver: "{{ cluster }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ inters }}"
+ when: inters != None \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/inventory b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/test.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/test.yml
new file mode 100644
index 00000000..f6a94f04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - na-ontap-cluster-config \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/vars/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/vars/main.yml
new file mode 100644
index 00000000..5adee01d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for na-ontap-cluster-config \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/LICENSE b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/LICENSE
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/README.md b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/README.md
new file mode 100644
index 00000000..1a7f489a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/README.md
@@ -0,0 +1,65 @@
+na_ontap_nas_create
+=========
+
+Create one or more NFS or CIFS exports
+
+Requirements
+------------
+
+Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release.
+
+Role Variables
+--------------
+```
+cluster: <short ONTAP name of cluster>
+netapp_hostname: <ONTAP mgmt ip or fqdn>
+netapp_username: <ONTAP admin account>
+netapp_password: <ONTAP admin account password>
+
+nas:
+ - { name: nfs_share, protocol: nfs, vserver: nfs_vserver, client: 172.32.0.201, ro: sys, rw: sys, su: sys, aggr: aggr1, size: 10, share: share_name }
+# If you are creating an NFS export you will omit the share: section.
+# If you are creating a CIFS share you may omit the ro, rw, su, client sections.
+
+```
+Dependencies
+------------
+
+The tasks in this role are dependent on information from the na_ontap_gather_facts module.
+The task for na_ontap_gather_facts can not be excluded.
+
+Example Playbook
+----------------
+```
+---
+- hosts: localhost
+ collections:
+ - netapp.ontap
+ vars_files:
+ - globals.yml
+ roles:
+ - na_ontap_nas_create
+```
+
+I use a globals file to hold my variables.
+```
+cluster_name: cluster
+
+netapp_hostname: 172.32.0.182
+netapp_username: admin
+netapp_password: netapp123
+
+nas:
+ - { name: nfs_share, protocol: nfs, vserver: nfs_vserver, client: 172.32.0.201, ro: sys, rw: sys, su: sys, aggr: aggr1, size: 10 }
+ - { name: cifs_share, protocol: cifs, vserver: cifs_vserver, aggr: aggr1, size: 10, share: cifs_share_1 }
+```
+
+License
+-------
+
+GNU v3
+
+Author Information
+------------------
+NetApp
+http://www.netapp.io
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/defaults/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/defaults/main.yml
new file mode 100644
index 00000000..cab01812
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# defaults file for na-ontap-nas-create
+netapp_hostname:
+netapp_password:
+netapp_username:
+validate_certs: false
+nas:
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/handlers/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/handlers/main.yml
new file mode 100644
index 00000000..7d57a11d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for na-ontap-nas-create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/meta/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/meta/main.yml
new file mode 100644
index 00000000..dcf84686
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/meta/main.yml
@@ -0,0 +1,9 @@
+galaxy_info:
+ author: "NetApp"
+ description: "Role for creating NFS and CIFS shares"
+ company: "NetApp"
+ license: BSD
+ min_ansible_version: 2.8
+ platforms:
+ galaxy_tags: [netapp, ontap]
+ dependencies: []
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tasks/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tasks/main.yml
new file mode 100644
index 00000000..97596df2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+- name: Create Policy
+ na_ontap_export_policy:
+ state: present
+ name: "{{ item.name }}"
+ vserver: "{{ item.vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ nas }}"
+ when: item.protocol == "nfs"
+- name: Setup rules
+ na_ontap_export_policy_rule:
+ state: present
+ policy_name: "{{ item.name }}"
+ vserver: "{{ item.vserver }}"
+ client_match: "{{ item.client }}"
+ ro_rule: "{{ item.ro }}"
+ rw_rule: "{{ item.rw }}"
+ super_user_security: "{{ item.su }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ nas }}"
+ when: item.protocol == "nfs"
+- name: Create volume
+ na_ontap_volume:
+ state: present
+ name: "{{ item.name }}"
+ aggregate_name: "{{ item.aggr }}"
+ size: "{{ item.size }}"
+ size_unit: gb
+ policy: "{{ 'default' if item.protocol.lower() == 'cifs' else item.name }}"
+ junction_path: "/{{ item.name }}"
+ space_guarantee: "none"
+ vserver: "{{ item.vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ nas }}"
+- name: Create Share
+ na_ontap_cifs:
+ state: present
+ share_name: "{{ item.share }}"
+ path: "/{{ item.name }}"
+ vserver: "{{ item.vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ nas }}"
+ when: item.share is defined
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/inventory b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/test.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/test.yml
new file mode 100644
index 00000000..e07ff7e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - na-ontap-nas-create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/vars/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/vars/main.yml
new file mode 100644
index 00000000..53bcdbb1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for na-ontap-nas-create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/LICENSE b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/LICENSE
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/README.md b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/README.md
new file mode 100644
index 00000000..8a7a46b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/README.md
@@ -0,0 +1,67 @@
+na_ontap_nas_create
+=========
+
+Create one or more luns for iSCSI or FCP
+
+Requirements
+------------
+
+Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release.
+
+Role Variables
+--------------
+```
+cluster: <short ONTAP name of cluster>
+netapp_hostname: <ONTAP mgmt ip or fqdn>
+netapp_username: <ONTAP admin account>
+netapp_password: <ONTAP admin account password>
+
+igroups:
+ - { name: igroup1, vserver: san_vserver, group_type: iscsi, ostype: linux, initiator: "<iqn or wwpn>" } # the quotes for iqn/wwpn are necessary because of the : in them.
+luns:
+ - { name: lun1, vol_name: lun_vol, aggr: aggr1, vserver: san_vserver, size: 10, ostype: linux, space_reserve: false, igroup: igroup1 }
+
+```
+Dependencies
+------------
+
+The tasks in this role are dependent on information from the na_ontap_gather_facts module.
+The task for na_ontap_gather_facts can not be excluded.
+
+Example Playbook
+----------------
+```
+---
+- hosts: localhost
+ collections:
+ - netapp.ontap
+ vars_files:
+ - globals.yml
+ roles
+ - na_ontap_san_create
+```
+
+I use a globals file to hold my variables.
+```
+cluster_name: cluster
+
+netapp_hostname: 172.32.0.182
+netapp_username: admin
+netapp_password: netapp123
+
+igroups:
+ - { name: igroup1, vserver: san_vserver, group_type: iscsi, ostype: linux, initiator: "iqn.1994-05.com.redhat:2750d14d868d" }
+
+luns:
+ - { name: lun1, vol_name: lun_vol, vserver: san_vserver, size: 10, ostype: linux, space_reserve: false, igroup: igroup1 }
+```
+
+License
+-------
+
+GNU v3
+
+Author Information
+------------------
+NetApp
+http://www.netapp.io
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/defaults/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/defaults/main.yml
new file mode 100644
index 00000000..60fbcbf5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# defaults file for na-ontap-san-create
+netapp_hostname:
+netapp_password:
+netapp_username:
+validate_certs: false
+igroups:
+luns:
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/handlers/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/handlers/main.yml
new file mode 100644
index 00000000..d2c5a8d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for na-ontap-san-create
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/meta/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/meta/main.yml
new file mode 100644
index 00000000..cf44f8d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/meta/main.yml
@@ -0,0 +1,9 @@
+galaxy_info:
+ author: "NetApp"
+ description: "Role for creating LUNs"
+ company: "NetApp"
+ license: BSD
+ min_ansible_version: 2.8
+ platforms:
+ galaxy_tags: [netapp, ontap]
+ dependencies: []
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tasks/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tasks/main.yml
new file mode 100644
index 00000000..23f50ba4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tasks/main.yml
@@ -0,0 +1,65 @@
+---
+- name: Create iGroup
+ na_ontap_igroup:
+ state: present
+ name: "{{ item.name }}"
+ vserver: "{{ item.vserver }}"
+ initiator_group_type: "{{ item.group_type }}"
+ ostype: "{{ item.ostype }}"
+ initiator: "{{ item.initiator }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ igroups }}"
+ when: igroups != None
+- name: Create volume
+ na_ontap_volume:
+ state: present
+ name: "{{ item.vol_name }}"
+ aggregate_name: "{{ item.aggr }}"
+ size: "{{ (item.size | int * 1.05) | round(0, 'ceil') | int }}"
+ size_unit: gb
+ space_guarantee: none
+ vserver: "{{ item.vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ luns }}"
+- name: Lun Create
+ na_ontap_lun:
+ state: present
+ name: "{{ item.name }}"
+ flexvol_name: "{{ item.vol_name }}"
+ vserver: "{{ item.vserver }}"
+ size: "{{ item.size }}"
+ size_unit: gb
+ ostype: "{{ item.ostype }}"
+ space_reserve: "{{ item.space_reserve }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ luns }}"
+ when: luns != None
+- name: Create LUN mapping
+ na_ontap_lun_map:
+ state: present
+ initiator_group_name: "{{ item.igroup }}"
+ path: "/vol/{{ item.vol_name }}/{{ item.name }}"
+ vserver: "{{ item.vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ luns }}"
+ when: luns != None
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/inventory b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/test.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/test.yml
new file mode 100644
index 00000000..c308417e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - na-ontap-san-create
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/vars/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/vars/main.yml
new file mode 100644
index 00000000..53bcdbb1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_san_create/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for na-ontap-nas-create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/README.md b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/README.md
new file mode 100644
index 00000000..9fd21481
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/README.md
@@ -0,0 +1,70 @@
+na_ontap_snapmirror_create
+=========
+
+Create or verify the following
+
+Cluster peer
+Vserver peer
+Destination volume
+Snapmirror relationship
+
+Requirements
+------------
+
+Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release.
+
+Role Variables
+--------------
+```
+src_ontap: # IP or FQDN of the source ONTAP cluster
+src_name: # Shortname of the source cluster
+src_lif: # IP address of a source Intercluster LIF
+src_vserver: # Name of source Vserver
+src_volume: # Name of source FlexVol
+dst_ontap: # IP or FQDN of the destination ONTAP cluster
+dst_name: # Shortname of the destination cluster
+dst_lif: # IP address of a destination Intercluster LIF
+dst_aggr: # Aggregate to create destination FlexVol on
+dst_vserver: # Name of destination Vserver
+username: # Admin username of both clusters
+password: # Password for Admin username
+```
+Dependencies
+------------
+
+The tasks in this role are dependent on information from the na_ontap_gather_facts module.
+The task for na_ontap_gather_facts can not be excluded.
+
+Example Playbook
+----------------
+```
+---
+- hosts: localhost
+ name: Snapmirror Create
+ gather_facts: false
+ vars:
+ src_ontap: 172.32.0.182
+ src_name: vsim
+ src_lif: 172.32.0.187
+ src_vserver: Marketing
+ src_volume: Marketing_Presentation
+ dst_ontap: 172.32.0.192
+ dst_name: cvo
+ dst_lif: 172.32.0.194
+ dst_aggr: aggr1
+ dst_vserver: backup_vserver
+ username: admin
+ password: netapp123
+ roles:
+ - na_ontap_snapmirror_create
+```
+
+License
+-------
+
+GNU v3
+
+Author Information
+------------------
+NetApp
+http://www.netapp.io
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/defaults/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/defaults/main.yml
new file mode 100644
index 00000000..f6321a9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+# defaults file for na_ontap_snapmirror_create
+src_ontap:
+src_name:
+src_lif:
+src_vserver:
+src_volume:
+dst_ontap:
+dst_lif:
+dst_vserver:
+dst_volume: "{{ src_volume }}_dest"
+dst_aggr:
+passphrase: IamAp483p45a83
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/handlers/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/handlers/main.yml
new file mode 100644
index 00000000..22ad49bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for na_ontap_snapmirror_create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/meta/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/meta/main.yml
new file mode 100644
index 00000000..22aa1327
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/meta/main.yml
@@ -0,0 +1,9 @@
+galaxy_info:
+ author: "NetApp"
+ description: "Create SnapMirror relationship"
+ company: "NetApp"
+ license: BSD
+ min_ansible_version: 2.8
+ platforms:
+ galaxy_tags: [netapp, ontap]
+ dependencies: []
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tasks/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tasks/main.yml
new file mode 100644
index 00000000..8af90d91
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+# tasks file for na_ontap_snapmirror_create
+- name: Create cluster peer
+ na_ontap_cluster_peer:
+ state: present
+ source_intercluster_lifs: "{{ src_lif }}"
+ dest_intercluster_lifs: "{{ dst_lif }}"
+ passphrase: "{{ passphrase }}"
+ hostname: "{{ src_ontap }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ dest_hostname: "{{ dst_ontap }}"
+ https: true
+ validate_certs: false
+- name: Source vserver peer create
+ na_ontap_vserver_peer:
+ state: present
+ peer_vserver: "{{ dst_vserver }}"
+ peer_cluster: "{{ dst_name }}"
+ vserver: "{{ src_vserver }}"
+ applications: snapmirror
+ hostname: "{{ src_ontap }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ dest_hostname: "{{ dst_ontap }}"
+ https: true
+ validate_certs: false
+- name: Validate destination FlexVol
+ na_ontap_volume:
+ state: present
+ name: "{{ dst_volume }}"
+ is_infinite: False
+ aggregate_name: "{{ dst_aggr }}"
+ size: 20
+ size_unit: mb
+ type: DP
+ #junction_path: "/{{ dst_volume }}"
+ vserver: "{{ dst_vserver }}"
+ hostname: "{{ dst_ontap }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+- name: Create SnapMirror
+ na_ontap_snapmirror:
+ state: present
+ source_volume: "{{ src_volume }}"
+ destination_volume: "{{ dst_volume }}"
+ source_vserver: "{{ src_vserver }}"
+ destination_vserver: "{{ dst_vserver }}"
+ hostname: "{{ dst_ontap }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/inventory b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/test.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/test.yml
new file mode 100644
index 00000000..91b29dfe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - na_ontap_snapmirror_create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/vars/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/vars/main.yml
new file mode 100644
index 00000000..03f2d3af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for na_ontap_snapmirror_create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/LICENSE b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/LICENSE
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/README.md b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/README.md
new file mode 100644
index 00000000..7542fc20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/README.md
@@ -0,0 +1,105 @@
+na_ontap_vserver_create
+=========
+
+Create one or more Vservers.
+
+Creates Vserver with specified protocol(s). Will join to Windows Domain provided AD credintals are included.
+Modifies default rule for NFS protocol to 0.0.0.0/0 ro to allow NFS connections
+
+Requirements
+------------
+
+Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release.
+
+Role Variables
+--------------
+```
+cluster: <short ONTAP name of cluster>
+netapp_hostname: <ONTAP mgmt ip or fqdn>
+netapp_username: <ONTAP admin account>
+netapp_password: <ONTAP admin account password>
+
+ontap_version: <version minor code> # OPTIONAL This defaults to ontap version minor code 140 (9.4) if running this against 9.3 or below add this variable and set to 120
+
+#Based on if Variables != or == None determins if a section runs. Each variable will take one or more dictonary entries. Simply omit sections
+#that you don't want to run. The following would run all sections
+
+vservers: # Vservers to create
+ - { name: nfs_vserver, aggr: aggr1, protocol: nfs, aggr_list: "aggr1,aggr2" } # aggr_list is optional. If not specified all aggregates will be added to the allowed list.
+ - { name: cifs_vserver, aggr: aggr1, protocol: cifs }
+
+vserver_dns: # DNS at the Vserver level.
+ - { vserver: cifs_vserver, dns_domains: lab.local, dns_nameservers: 172.32.0.40 }
+
+lifs: # interfaces for the Vservers being created
+ - { name: nfs_vserver_data_lif, vserver: nfs_vserver, node: cluster-01, port: e0c, protocol: nfs, address: 172.32.0.193, netmask: 255.255.255.0 }
+ - { name: cifs_vserver_data_lif, vserver: nfs_vserver, node: cluster-01, port: e0c, protocol: nfs, address: 172.32.0.194, netmask: 255.255.255.0 }
+
+gateway: # To configure the default gateway for the Vserver.
+ - { vserver: nfs_vserver, destination: 0.0.0.0/0, gateway: 172.32.0.1 }
+
+cifs: # Vservers to join to an AD Domain
+ - { vserver: cifs_vserver, cifs_server_name: netapp1, domain: ansible.local, force: true }
+
+fcp: # sets FCP ports as Target
+ - { adapter: 0e, node: cluster-01 }
+```
+Dependencies
+------------
+
+The tasks in this role are dependent on information from the na_ontap_gather_facts module.
+The task for na_ontap_gather_facts can not be excluded.
+
+Example Playbook
+----------------
+```
+---
+- hosts: localhost
+ collections:
+ - netapp.ontap
+ vars_prompt:
+ - name: admin_user_name
+ prompt: domain admin (enter if skipped)
+ - name: admin_password
+ prompt: domain admin password (enter if skipped)
+ vars_files:
+ - globals.yml
+ roles
+ - na_ontap_vserver_create
+```
+I use a globals file to hold my variables.
+```
+---
+globals.yml
+cluster_name: cluster
+
+netapp_hostname: 172.32.0.182
+netapp_username: admin
+netapp_password: netapp123
+
+vservers:
+ - { name: nfs_vserver, aggr: aggr1, protocol: NFS }
+ - { name: cifs_vserver, aggr: aggr1, protocol: cifs }
+ - { name: nas_vserver, aggr: aggr1, protocol: 'cifs,nfs' }
+
+lifs:
+ - { name: nfs_vserver_data_lif, vserver: nfs_vserver, node: vsim-01, port: e0c, protocol: nfs, address: 172.32.0.183, netmask: 255.255.255.0 }
+ - { name: cifs_vserver_data_lif, vserver: cifs_vserver, node: vsim-01, port: e0c, protocol: nfs, address: 172.32.0.184, netmask: 255.255.255.0 }
+ - { name: nas_vserver_data_lif, vserver: nas_vserver, node: vsim-02, port: e0c, protocol: nfs, address: 172.32.0.185, netmask: 255.255.255.0 }
+
+vserver_dns:
+ - { vserver: cifs_vserver, dns_domains: lab.local, dns_nameservers: 172.32.0.40 }
+
+cifs:
+ - { vserver: cifs_vserver, cifs_server_name: netapp1, domain: openstack.local, force: true }
+```
+
+License
+-------
+
+GNU v3
+
+Author Information
+------------------
+NetApp
+http://www.netapp.io
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/defaults/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/defaults/main.yml
new file mode 100644
index 00000000..0c5b968a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# defaults file for na-ontap-vserver-create
+netapp_hostname:
+netapp_username:
+netapp_password:
+validate_certs: false
+vservers:
+vserver_dns:
+lifs:
+cifs:
+nfs:
+fcp:
+gateway:
+ontap_version: 140
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/handlers/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/handlers/main.yml
new file mode 100644
index 00000000..e826ee3e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for na-ontap-vserver-create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/meta/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/meta/main.yml
new file mode 100644
index 00000000..ddc4ca71
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/meta/main.yml
@@ -0,0 +1,8 @@
+galaxy_info:
+ author: NetApp
+ description: "Create one or more SVMs"
+ company: NetApp
+ license: BSD
+ min_ansible_version: 2.8
+ galaxy_tags: [netapp, ontap]
+ dependencies: []
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tasks/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tasks/main.yml
new file mode 100644
index 00000000..031c65ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tasks/main.yml
@@ -0,0 +1,187 @@
+---
+- name: Create Vserver
+ na_ontap_svm:
+ state: present
+ name: "{{ item.name }}"
+ root_volume: "{{ item.name }}_root"
+ root_volume_aggregate: "{{ item.aggr }}"
+ root_volume_security_style: "{{ 'ntfs' if item.protocol.lower() is search('cifs') else 'unix' }}"
+ aggr_list: "{{ '*' if item.aggr_list is not defined else item.aggr_list }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ vservers }}"
+ when: vservers != None
+- name: Setup FCP
+ na_ontap_fcp:
+ state: present
+ service_state: started
+ vserver: "{{ item.name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ vservers }}"
+ when: item.protocol.lower() is search("fcp")
+- name: Setup iSCSI
+ na_ontap_iscsi:
+ state: present
+ service_state: started
+ vserver: "{{ item.name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ vservers }}"
+ when: item.protocol.lower() is search("iscsi")
+- name: Modify adapter
+ na_ontap_ucadapter:
+ state: present
+ adapter_name: "{{ item.adapter_name }}"
+ node_name: "{{ item.node_name }}"
+ mode: fc
+ type: target
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ fcp }}"
+ when: fcp != None
+- name: Create Interface
+ na_ontap_interface:
+ state: present
+ interface_name: "{{ item.name }}"
+ home_port: "{{ item.port }}"
+ home_node: "{{ item.node }}"
+ role: data
+ protocols: "{{ item.protocol }}"
+ admin_status: up
+ failover_policy: "{{ omit if item.protocol.lower() is search('iscsi') else 'system-defined' }}"
+ firewall_policy: data
+ is_auto_revert: "{{ 'true' if item.protocol.lower() is not search('iscsi') else omit }}"
+ address: "{{ item.address }}"
+ netmask: "{{ item.netmask }}"
+ vserver: "{{ item.vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ lifs }}"
+ when: lifs != None
+- name: Add default route
+ na_ontap_net_routes:
+ state: present
+ vserver: "{{ item.vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ destination: "{{ item.destination }}"
+ gateway: "{{ item.gateway }}"
+ metric: 30
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ gateway }}"
+ when: gateway != None
+- name: Create DNS
+ na_ontap_dns:
+ state: present
+ vserver: "{{ item.vserver }}"
+ domains: "{{ item.dns_domains }}"
+ nameservers: "{{ item.dns_nameservers }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ vserver_dns }}"
+ when: vserver_dns !=None
+- name: Create CIFS Server
+ na_ontap_cifs_server:
+ state: present
+ vserver: "{{ item.vserver }}"
+ domain: "{{ item.domain }}"
+ cifs_server_name: "{{ item.cifs_server_name }}"
+ force: "{{ 'false' if item.force is not defined else item.force }}"
+ admin_password: "{{ admin_password }}"
+ admin_user_name: "{{ admin_user_name }}"
+ ou: "{{ item.ou | default(omit) }}"
+ service_state: started
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ cifs }}"
+ when: cifs != None
+- name: Create NFS Server
+ na_ontap_nfs:
+ state: present
+ service_state: started
+ vserver: "{{ item.name }}"
+ nfsv3: enabled
+ nfsv4: disabled
+ nfsv41: disabled
+ tcp: enabled
+ udp: enabled
+ vstorage_state: disabled
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ vservers }}"
+ when:
+ - item.protocol.lower() is search("nfs")
+ - ontap_version >= 130
+- name: Create NFS Server
+ na_ontap_nfs:
+ state: present
+ service_state: started
+ vserver: "{{ item.name }}"
+ nfsv3: enabled
+ nfsv4: disabled
+ nfsv41: disabled
+ vstorage_state: disabled
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ vservers }}"
+ when:
+ - item.protocol.lower() is search("nfs")
+ - ontap_version < 130
+- name: Setup default NFS rule
+ na_ontap_export_policy_rule:
+ state: present
+ policy_name: default
+ vserver: "{{ item.name }}"
+ client_match: 0.0.0.0/0
+ ro_rule: any
+ rw_rule: none
+ protocol: any
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+ with_items:
+ "{{ vservers }}"
+ when:
+ item.protocol.lower() is search("nfs")
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/inventory b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/test.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/test.yml
new file mode 100644
index 00000000..072fa17b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - na-ontap-vserver-create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/vars/main.yml b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/vars/main.yml
new file mode 100644
index 00000000..e90c60d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for na-ontap-vserver-create \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/netapp/ontap/tests/sanity/ignore-2.10.txt
new file mode 100644
index 00000000..e686ab44
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,6 @@
+plugins/modules/na_ontap_autosupport_invoke.py validate-modules:invalid-argument-name
+plugins/modules/na_ontap_info.py validate-modules:parameter-state-invalid-choice
+plugins/modules/na_ontap_login_messages.py validate-modules:invalid-argument-name
+plugins/modules/na_ontap_motd.py validate-modules:invalid-argument-name
+plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid
+plugins/modules/na_ontap_rest_info.py validate-modules:parameter-state-invalid-choice
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/netapp/ontap/tests/sanity/ignore-2.9.txt
new file mode 100644
index 00000000..5c626a03
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/sanity/ignore-2.9.txt
@@ -0,0 +1 @@
+plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/__init__.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/__init__.py
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/builtins.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/builtins.py
new file mode 100644
index 00000000..f60ee678
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/builtins.py
@@ -0,0 +1,33 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/mock.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/mock.py
new file mode 100644
index 00000000..0972cd2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/unittest.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/unittest.py
new file mode 100644
index 00000000..98f08ad6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/compat/unittest.py
@@ -0,0 +1,38 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp.py
new file mode 100644
index 00000000..be76d435
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp.py
@@ -0,0 +1,468 @@
+# Copyright (c) 2018 NetApp
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for module_utils netapp.py '''
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os.path
+import tempfile
+
+import pytest
+
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import COLLECTION_VERSION
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip("skipping as missing required netapp_lib")
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': ({}, None),
+ 'end_of_sequence': (None, "Unexpected call to send_request"),
+ 'generic_error': (None, "Expected error"),
+}
+
+
+class MockONTAPConnection(object):
+ ''' mock a server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'vserver':
+ xml = self.build_vserver_info(self.parm1)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_vserver_info(vserver):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('attributes-list')
+ attributes.add_node_with_children('vserver-info',
+ **{'vserver-name': vserver})
+ xml.add_child_elem(attributes)
+ return xml
+
+
+def test_ems_log_event_version():
+ ''' validate Ansible version is correctly read '''
+ source = 'unittest'
+ server = MockONTAPConnection()
+ netapp_utils.ems_log_event(source, server)
+ xml = server.xml_in
+ version = xml.get_child_content('app-version')
+ if version == ansible_version:
+ assert version == ansible_version
+ else:
+ assert version == COLLECTION_VERSION
+ print("Ansible version: %s" % ansible_version)
+
+
+def test_get_cserver():
+ ''' validate cluster vserser name is correctly retrieved '''
+ svm_name = 'svm1'
+ server = MockONTAPConnection('vserver', svm_name)
+ cserver = netapp_utils.get_cserver(server)
+ assert cserver == svm_name
+
+
+def mock_args(feature_flags=None):
+ args = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+ if feature_flags is not None:
+ args.update({'feature_flags': feature_flags})
+ return args
+
+
+def cert_args(feature_flags=None):
+ args = {
+ 'hostname': 'test',
+ 'cert_filepath': 'test_pem.pem',
+ 'key_filepath': 'test_key.key'
+ }
+ if feature_flags is not None:
+ args.update({'feature_flags': feature_flags})
+ return args
+
+
+def create_module(args):
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ set_module_args(args)
+ module = basic.AnsibleModule(argument_spec)
+ return module
+
+
+def create_restapi_object(args):
+ module = create_module(args)
+ module.fail_json = fail_json
+ rest_api = netapp_utils.OntapRestAPI(module)
+ return rest_api
+
+
+def create_ontapzapicx_object(args, feature_flags=None):
+ module_args = dict(args)
+ if feature_flags is not None:
+ module_args['feature_flags'] = feature_flags
+ module = create_module(module_args)
+ module.fail_json = fail_json
+ my_args = dict(args)
+ my_args.update(dict(module=module))
+ zapi_cx = netapp_utils.OntapZAPICx(**my_args)
+ return zapi_cx
+
+
+def test_write_to_file():
+ ''' check error and debug logs can be written to disk '''
+ rest_api = create_restapi_object(mock_args())
+ # logging an error also add a debug record
+ rest_api.log_error(404, '404 error')
+ print(rest_api.errors)
+ print(rest_api.debug_logs)
+ # logging a debug record only
+ rest_api.log_debug(501, '501 error')
+ print(rest_api.errors)
+ print(rest_api.debug_logs)
+
+ try:
+ tempdir = tempfile.TemporaryDirectory()
+ filepath = os.path.join(tempdir.name, 'log.txt')
+ except AttributeError:
+ # python 2.7 does not support tempfile.TemporaryDirectory
+ # we're taking a small chance that there is a race condition
+ filepath = '/tmp/deleteme354.txt'
+ rest_api.write_debug_log_to_file(filepath=filepath, append=False)
+ with open(filepath, 'r') as log:
+ lines = log.readlines()
+ assert len(lines) == 4
+ assert lines[0].strip() == 'Debug: 404'
+ assert lines[2].strip() == 'Debug: 501'
+
+ # Idempotent, as append is False
+ rest_api.write_debug_log_to_file(filepath=filepath, append=False)
+ with open(filepath, 'r') as log:
+ lines = log.readlines()
+ assert len(lines) == 4
+ assert lines[0].strip() == 'Debug: 404'
+ assert lines[2].strip() == 'Debug: 501'
+
+ # Duplication, as append is True
+ rest_api.write_debug_log_to_file(filepath=filepath, append=True)
+ with open(filepath, 'r') as log:
+ lines = log.readlines()
+ assert len(lines) == 8
+ assert lines[0].strip() == 'Debug: 404'
+ assert lines[2].strip() == 'Debug: 501'
+ assert lines[4].strip() == 'Debug: 404'
+ assert lines[6].strip() == 'Debug: 501'
+
+ rest_api.write_errors_to_file(filepath=filepath, append=False)
+ with open(filepath, 'r') as log:
+ lines = log.readlines()
+ assert len(lines) == 1
+ assert lines[0].strip() == 'Error: 404 error'
+
+ # Idempotent, as append is False
+ rest_api.write_errors_to_file(filepath=filepath, append=False)
+ with open(filepath, 'r') as log:
+ lines = log.readlines()
+ assert len(lines) == 1
+ assert lines[0].strip() == 'Error: 404 error'
+
+ # Duplication, as append is True
+ rest_api.write_errors_to_file(filepath=filepath, append=True)
+ with open(filepath, 'r') as log:
+ lines = log.readlines()
+ assert len(lines) == 2
+ assert lines[0].strip() == 'Error: 404 error'
+ assert lines[1].strip() == 'Error: 404 error'
+
+
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_is_rest_true(mock_request):
+ ''' is_rest is expected to return True '''
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ ]
+ rest_api = create_restapi_object(mock_args())
+ is_rest = rest_api.is_rest()
+ print(rest_api.errors)
+ print(rest_api.debug_logs)
+ assert is_rest
+
+
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_is_rest_false(mock_request):
+ ''' is_rest is expected to return False '''
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ ]
+ rest_api = create_restapi_object(mock_args())
+ is_rest = rest_api.is_rest()
+ print(rest_api.errors)
+ print(rest_api.debug_logs)
+ assert not is_rest
+ assert rest_api.errors[0] == SRR['is_zapi'][2]
+ assert rest_api.debug_logs[0][0] == SRR['is_zapi'][0] # status_code
+ assert rest_api.debug_logs[0][1] == SRR['is_zapi'][2] # error
+
+
+def test_has_feature_success_default():
+ ''' existing feature_flag with default '''
+ flag = 'deprecation_warning'
+ module = create_module(mock_args())
+ value = netapp_utils.has_feature(module, flag)
+ assert value
+
+
+def test_has_feature_success_user_true():
+ ''' existing feature_flag with value set to True '''
+ flag = 'user_deprecation_warning'
+ args = dict(mock_args({flag: True}))
+ module = create_module(args)
+ value = netapp_utils.has_feature(module, flag)
+ assert value
+
+
+def test_has_feature_success_user_false():
+ ''' existing feature_flag with value set to False '''
+ flag = 'user_deprecation_warning'
+ args = dict(mock_args({flag: False}))
+ print(args)
+ module = create_module(args)
+ value = netapp_utils.has_feature(module, flag)
+ assert not value
+
+
+def test_has_feature_invalid_key():
+ ''' existing feature_flag with unknown key '''
+ flag = 'deprecation_warning_bad_key'
+ module = create_module(mock_args())
+ # replace ANsible fail method with ours
+ module.fail_json = fail_json
+ with pytest.raises(AnsibleFailJson) as exc:
+ netapp_utils.has_feature(module, flag)
+ msg = 'Internal error: unexpected feature flag: %s' % flag
+ assert exc.value.args[0]['msg'] == msg
+
+
+def test_fail_has_username_password_and_cert():
+ ''' failure case in auth_method '''
+ args = mock_args()
+ args.update(dict(cert_filepath='dummy'))
+ with pytest.raises(AnsibleFailJson) as exc:
+ create_restapi_object(args)
+ msg = 'Error: cannot have both basic authentication (username/password) and certificate authentication (cert/key files)'
+ assert exc.value.args[0]['msg'] == msg
+
+
+def test_fail_has_username_password_and_key():
+ ''' failure case in auth_method '''
+ args = mock_args()
+ args.update(dict(key_filepath='dummy'))
+ with pytest.raises(AnsibleFailJson) as exc:
+ create_restapi_object(args)
+ msg = 'Error: cannot have both basic authentication (username/password) and certificate authentication (cert/key files)'
+ assert exc.value.args[0]['msg'] == msg
+
+
+def test_fail_has_username_and_cert():
+ ''' failure case in auth_method '''
+ args = mock_args()
+ args.update(dict(cert_filepath='dummy'))
+ del args['password']
+ with pytest.raises(AnsibleFailJson) as exc:
+ create_restapi_object(args)
+ msg = 'Error: username and password have to be provided together and cannot be used with cert or key files'
+ assert exc.value.args[0]['msg'] == msg
+
+
+def test_fail_has_password_and_cert():
+ ''' failure case in auth_method '''
+ args = mock_args()
+ args.update(dict(cert_filepath='dummy'))
+ del args['username']
+ with pytest.raises(AnsibleFailJson) as exc:
+ create_restapi_object(args)
+ msg = 'Error: username and password have to be provided together and cannot be used with cert or key files'
+ assert exc.value.args[0]['msg'] == msg
+
+
+def test_has_username_password():
+ ''' auth_method reports expected value '''
+ args = mock_args()
+ rest_api = create_restapi_object(args)
+ assert rest_api.auth_method == 'speedy_basic_auth'
+
+
+def test_has_cert_no_key():
+ ''' auth_method reports expected value '''
+ args = cert_args()
+ del args['key_filepath']
+ rest_api = create_restapi_object(args)
+ assert rest_api.auth_method == 'single_cert'
+
+
+def test_has_cert_and_key():
+ ''' auth_method reports expected value '''
+ args = cert_args()
+ rest_api = create_restapi_object(args)
+ assert rest_api.auth_method == 'cert_key'
+
+
+def test_certificate_method_zapi():
+ ''' should fail when trying to read the certificate file '''
+ args = cert_args()
+ zapi_cx = create_ontapzapicx_object(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ zapi_cx._create_certificate_auth_handler()
+ msg1 = 'Cannot load SSL certificate, check files exist.'
+ # for python 2,6 :(
+ msg2 = 'SSL certificate authentication requires python 2.7 or later.'
+ assert exc.value.args[0]['msg'].startswith((msg1, msg2))
+
+
+def test_classify_zapi_exception_cluster_only():
+ ''' verify output matches expectations '''
+ code = 13005
+ message = 'Unable to find API: diagnosis-alert-get-iter on data vserver trident_svm'
+ zapi_exception = netapp_utils.zapi.NaApiError(code, message)
+ kind, new_message = netapp_utils.classify_zapi_exception(zapi_exception)
+ assert kind == 'missing_vserver_api_error'
+ assert new_message.endswith("%d:%s" % (code, message))
+
+
+def test_classify_zapi_exception_rpc_error():
+ ''' verify output matches expectations '''
+ code = 13001
+ message = "RPC: Couldn't make connection [from mgwd on node \"laurentn-vsim1\" (VSID: -1) to mgwd at 172.32.78.223]"
+ error_message = 'NetApp API failed. Reason - %d:%s' % (code, message)
+ zapi_exception = netapp_utils.zapi.NaApiError(code, message)
+ kind, new_message = netapp_utils.classify_zapi_exception(zapi_exception)
+ assert kind == 'rpc_error'
+ assert new_message == error_message
+
+
+def test_classify_zapi_exception_other_error():
+ ''' verify output matches expectations '''
+ code = 13008
+ message = 'whatever'
+ error_message = 'NetApp API failed. Reason - %d:%s' % (code, message)
+ zapi_exception = netapp_utils.zapi.NaApiError(code, message)
+ kind, new_message = netapp_utils.classify_zapi_exception(zapi_exception)
+ assert kind == 'other_error'
+ assert new_message == error_message
+
+
+def test_zapi_parse_response_sanitized():
+ ''' should not fail when trying to read invalid XML characters (\x08) '''
+ args = mock_args()
+ zapi_cx = create_ontapzapicx_object(args)
+ response = b"<?xml version='1.0' encoding='UTF-8' ?>\n<!DOCTYPE netapp SYSTEM 'file:/etc/netapp_gx.dtd'>\n"
+ response += b"<netapp version='1.180' xmlns='http://www.netapp.com/filer/admin'>\n<results status=\"passed\">"
+ response += b"<cli-output> (cluster log-forwarding create)\n\n"
+ response += b"Testing network connectivity to the destination host 10.10.10.10. \x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\n\n"
+ response += b"Error: command failed: Cannot contact destination host (10.10.10.10) from node\n"
+ response += b" &quot;laurentn-vsim1&quot;. Verify connectivity to desired host or skip the\n"
+ response += b" connectivity check with the &quot;-force&quot; parameter.</cli-output>"
+ response += b"<cli-result-value>0</cli-result-value></results></netapp>\n"
+ # Manually extract cli-output contents
+ cli_output = response.split(b'<cli-output>')[1]
+ cli_output = cli_output.split(b'</cli-output>')[0]
+ cli_output = cli_output.replace(b'&quot;', b'"')
+ # the XML parser would chole on \x08, zapi_cx._parse_response replaces them with '.'
+ cli_output = cli_output.replace(b'\x08', b'.')
+ # Use xml parser to extract cli-output contents
+ xml = zapi_cx._parse_response(response)
+ results = xml.get_child_by_name('results')
+ new_cli_output = results.get_child_content('cli-output')
+ assert cli_output.decode() == new_cli_output
+
+
+def test_zapi_parse_response_unsanitized():
+ ''' should fail when trying to read invalid XML characters (\x08) '''
+ args = mock_args()
+ # use feature_flags to disable sanitization
+ zapi_cx = create_ontapzapicx_object(args, dict(sanitize_xml=False))
+ response = b"<?xml version='1.0' encoding='UTF-8' ?>\n<!DOCTYPE netapp SYSTEM 'file:/etc/netapp_gx.dtd'>\n"
+ response += b"<netapp version='1.180' xmlns='http://www.netapp.com/filer/admin'>\n<results status=\"passed\">"
+ response += b"<cli-output> (cluster log-forwarding create)\n\n"
+ response += b"Testing network connectivity to the destination host 10.10.10.10. \x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\n\n"
+ response += b"Error: command failed: Cannot contact destination host (10.10.10.10) from node\n"
+ response += b" &quot;laurentn-vsim1&quot;. Verify connectivity to desired host or skip the\n"
+ response += b" connectivity check with the &quot;-force&quot; parameter.</cli-output>"
+ response += b"<cli-result-value>0</cli-result-value></results></netapp>\n"
+ with pytest.raises(netapp_utils.zapi.etree.XMLSyntaxError) as exc:
+ zapi_cx._parse_response(response)
+ msg = 'PCDATA invalid Char value 8'
+ assert exc.value.msg.startswith(msg)
+
+
+def test_zapi_cx_add_auth_header():
+ ''' should add header '''
+ args = mock_args()
+ module = create_module(args)
+ zapi_cx = netapp_utils.setup_na_ontap_zapi(module)
+ assert isinstance(zapi_cx, netapp_utils.OntapZAPICx)
+ assert zapi_cx.base64_creds is not None
+ request, dummy = zapi_cx._create_request(netapp_utils.zapi.NaElement('dummy_tag'))
+ assert "Authorization" in [x[0] for x in request.header_items()]
+
+
+def test_zapi_cx_add_auth_header_explicit():
+ ''' should add header '''
+ args = mock_args()
+ args['feature_flags'] = dict(classic_basic_authorization=False)
+ module = create_module(args)
+ zapi_cx = netapp_utils.setup_na_ontap_zapi(module)
+ assert isinstance(zapi_cx, netapp_utils.OntapZAPICx)
+ assert zapi_cx.base64_creds is not None
+ request, dummy = zapi_cx._create_request(netapp_utils.zapi.NaElement('dummy_tag'))
+ assert "Authorization" in [x[0] for x in request.header_items()]
+
+
+def test_zapi_cx_no_auth_header():
+ ''' should add header '''
+ args = mock_args()
+ args['feature_flags'] = dict(classic_basic_authorization=True)
+ module = create_module(args)
+ zapi_cx = netapp_utils.setup_na_ontap_zapi(module)
+ assert not isinstance(zapi_cx, netapp_utils.OntapZAPICx)
+ request, dummy = zapi_cx._create_request(netapp_utils.zapi.NaElement('dummy_tag'))
+ assert "Authorization" not in [x[0] for x in request.header_items()]
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_module.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_module.py
new file mode 100644
index 00000000..8b31ed7f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_module.py
@@ -0,0 +1,400 @@
+# Copyright (c) 2018 NetApp
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for module_utils netapp_module.py '''
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule as na_helper
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+class MockModule(object):
+ ''' rough mock for an Ansible module class '''
+ def __init__(self, required_param=None, not_required_param=None, unqualified_param=None):
+ self.argument_spec = dict(
+ required_param=dict(required=True),
+ not_required_param=dict(required=False),
+ unqualified_param=dict(),
+ feature_flags=dict(type='dict')
+ )
+ self.params = dict(
+ required_param=required_param,
+ not_required_param=not_required_param,
+ unqualified_param=unqualified_param,
+ feature_flags=dict(type='dict')
+ )
+
+ def fail_json(self, *args, **kwargs): # pylint: disable=unused-argument
+ """function to simulate fail_json: package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def test_get_cd_action_create(self):
+ ''' validate cd_action for create '''
+ current = None
+ desired = {'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result == 'create'
+
+ def test_get_cd_action_delete(self):
+ ''' validate cd_action for delete '''
+ current = {'state': 'absent'}
+ desired = {'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result == 'delete'
+
+ def test_get_cd_action(self):
+ ''' validate cd_action for returning None '''
+ current = None
+ desired = {'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result is None
+
+ def test_get_modified_attributes_for_no_data(self):
+ ''' validate modified attributes when current is None '''
+ current = None
+ desired = {'name': 'test'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {}
+
+ def test_get_modified_attributes(self):
+ ''' validate modified attributes '''
+ current = {'name': ['test', 'abcd', 'xyz', 'pqr'], 'state': 'present'}
+ desired = {'name': ['abcd', 'abc', 'xyz', 'pqr'], 'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == desired
+
+ def test_get_modified_attributes_for_intersecting_mixed_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': [2, 'four', 'six', 8]}
+ desired = {'name': ['a', 8, 'ab', 'four', 'abcd']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abcd']}
+
+ def test_get_modified_attributes_for_intersecting_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['two', 'four', 'six', 'eight']}
+ desired = {'name': ['a', 'six', 'ab', 'four', 'abc']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abc']}
+
+ def test_get_modified_attributes_for_nonintersecting_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['two', 'four', 'six', 'eight']}
+ desired = {'name': ['a', 'ab', 'abd']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abd']}
+
+ def test_get_modified_attributes_for_list_of_dicts_no_data(self):
+ ''' validate modified attributes for list diff '''
+ current = None
+ desired = {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {}
+
+ def test_get_modified_attributes_for_intersecting_list_of_dicts(self):
+ ''' validate modified attributes for list diff '''
+ current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]}
+ desired = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]}
+
+ def test_get_modified_attributes_for_nonintersecting_list_of_dicts(self):
+ ''' validate modified attributes for list diff '''
+ current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]}
+ desired = {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+
+ def test_get_modified_attributes_for_list_diff(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['test', 'abcd'], 'state': 'present'}
+ desired = {'name': ['abcd', 'abc'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['abc']}
+
+ def test_get_modified_attributes_for_no_change(self):
+ ''' validate modified attributes for same data in current and desired '''
+ current = {'name': 'test'}
+ desired = {'name': 'test'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {}
+
+ def test_get_modified_attributes_for_an_empty_desired_list(self):
+ ''' validate modified attributes for an empty desired list '''
+ current = {'snapmirror_label': ['daily', 'weekly', 'monthly'], 'state': 'present'}
+ desired = {'snapmirror_label': [], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {'snapmirror_label': []}
+
+ def test_get_modified_attributes_for_an_empty_desired_list_diff(self):
+ ''' validate modified attributes for an empty desired list with diff'''
+ current = {'snapmirror_label': ['daily', 'weekly', 'monthly'], 'state': 'present'}
+ desired = {'snapmirror_label': [], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'snapmirror_label': []}
+
+ def test_get_modified_attributes_for_an_empty_current_list(self):
+ ''' validate modified attributes for an empty current list '''
+ current = {'snapmirror_label': [], 'state': 'present'}
+ desired = {'snapmirror_label': ['daily', 'weekly', 'monthly'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {'snapmirror_label': ['daily', 'weekly', 'monthly']}
+
+ def test_get_modified_attributes_for_an_empty_current_list_diff(self):
+ ''' validate modified attributes for an empty current list with diff'''
+ current = {'snapmirror_label': [], 'state': 'present'}
+ desired = {'snapmirror_label': ['daily', 'weekly', 'monthly'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'snapmirror_label': ['daily', 'weekly', 'monthly']}
+
+ def test_get_modified_attributes_for_empty_lists(self):
+ ''' validate modified attributes for empty lists '''
+ current = {'snapmirror_label': [], 'state': 'present'}
+ desired = {'snapmirror_label': [], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {}
+
+ def test_get_modified_attributes_for_empty_lists_diff(self):
+ ''' validate modified attributes for empty lists with diff '''
+ current = {'snapmirror_label': [], 'state': 'present'}
+ desired = {'snapmirror_label': [], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {}
+
+ def test_get_modified_attributes_equal_lists_with_duplicates(self):
+ ''' validate modified attributes for equal lists with duplicates '''
+ current = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'}
+ desired = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, False)
+ assert result == {}
+
+ def test_get_modified_attributes_equal_lists_with_duplicates_diff(self):
+ ''' validate modified attributes for equal lists with duplicates with diff '''
+ current = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'}
+ desired = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {}
+
+ def test_get_modified_attributes_for_current_list_with_duplicates(self):
+ ''' validate modified attributes for current list with duplicates '''
+ current = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'}
+ desired = {'schedule': ['daily', 'daily', 'weekly', 'monthly'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, False)
+ assert result == {'schedule': ['daily', 'daily', 'weekly', 'monthly']}
+
+ def test_get_modified_attributes_for_current_list_with_duplicates_diff(self):
+ ''' validate modified attributes for current list with duplicates with diff '''
+ current = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'}
+ desired = {'schedule': ['daily', 'daily', 'weekly', 'monthly'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'schedule': []}
+
+ def test_get_modified_attributes_for_desired_list_with_duplicates(self):
+ ''' validate modified attributes for desired list with duplicates '''
+ current = {'schedule': ['daily', 'weekly', 'monthly'], 'state': 'present'}
+ desired = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, False)
+ assert result == {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily']}
+
+ def test_get_modified_attributes_for_desired_list_with_duplicates_diff(self):
+ ''' validate modified attributes for desired list with duplicates with diff '''
+ current = {'schedule': ['daily', 'weekly', 'monthly'], 'state': 'present'}
+ desired = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'schedule': ['hourly', 'daily', 'daily']}
+
+ def test_is_rename_action_for_empty_input(self):
+ ''' validate rename action for input None '''
+ source = None
+ target = None
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result == source
+
+ def test_is_rename_action_for_no_source(self):
+ ''' validate rename action when source is None '''
+ source = None
+ target = 'test2'
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is False
+
+ def test_is_rename_action_for_no_target(self):
+ ''' validate rename action when target is None '''
+ source = 'test2'
+ target = None
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is True
+
+ def test_is_rename_action(self):
+ ''' validate rename action '''
+ source = 'test'
+ target = 'test2'
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is False
+
+ def test_required_is_not_set_to_none(self):
+ ''' if a key is present, without a value, Ansible sets it to None '''
+ my_obj = na_helper()
+ my_module = MockModule()
+ print(my_module.argument_spec)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.check_and_set_parameters(my_module)
+ msg = 'required_param requires a value, got: None'
+ assert exc.value.args[0]['msg'] == msg
+
+ # force a value different than None
+ my_module.params['required_param'] = 1
+ my_params = my_obj.check_and_set_parameters(my_module)
+ assert set(my_params.keys()) == set(['required_param', 'feature_flags'])
+
+ def test_sanitize_wwn_no_action(self):
+ ''' no change '''
+ initiator = 'tEsT'
+ expected = initiator
+ my_obj = na_helper()
+ result = my_obj.sanitize_wwn(initiator)
+ assert result == expected
+
+ def test_sanitize_wwn_no_action_valid_iscsi(self):
+ ''' no change '''
+ initiator = 'iqn.1995-08.com.eXaMpLe:StRiNg'
+ expected = initiator
+ my_obj = na_helper()
+ result = my_obj.sanitize_wwn(initiator)
+ assert result == expected
+
+ def test_sanitize_wwn_no_action_valid_wwn(self):
+ ''' no change '''
+ initiator = '01:02:03:04:0A:0b:0C:0d'
+ expected = initiator.lower()
+ my_obj = na_helper()
+ result = my_obj.sanitize_wwn(initiator)
+ assert result == expected
+
+ def test_filter_empty_dict(self):
+ ''' empty dict return empty dict '''
+ my_obj = na_helper()
+ arg = dict()
+ result = my_obj.filter_out_none_entries(arg)
+ assert arg == result
+
+ def test_filter_empty_list(self):
+ ''' empty list return empty list '''
+ my_obj = na_helper()
+ arg = list()
+ result = my_obj.filter_out_none_entries(arg)
+ assert arg == result
+
+ def test_filter_typeerror_on_none(self):
+ ''' empty list return empty list '''
+ my_obj = na_helper()
+ arg = None
+ with pytest.raises(TypeError) as exc:
+ my_obj.filter_out_none_entries(arg)
+ msg = "unexpected type <class 'NoneType'>"
+ if sys.version_info < (3, 0):
+ # the assert fails on 2.x
+ return
+ assert exc.value.args[0] == msg
+
+ def test_filter_typeerror_on_str(self):
+ ''' empty list return empty list '''
+ my_obj = na_helper()
+ arg = ""
+ with pytest.raises(TypeError) as exc:
+ my_obj.filter_out_none_entries(arg)
+ msg = "unexpected type <class 'str'>"
+ if sys.version_info < (3, 0):
+ # the assert fails on 2.x
+ return
+ assert exc.value.args[0] == msg
+
+ def test_filter_simple_dict(self):
+ ''' simple dict return simple dict '''
+ my_obj = na_helper()
+ arg = dict(a=None, b=1, c=None, d=2, e=3)
+ expected = dict(b=1, d=2, e=3)
+ result = my_obj.filter_out_none_entries(arg)
+ assert expected == result
+
+ def test_filter_simple_list(self):
+ ''' simple list return simple list '''
+ my_obj = na_helper()
+ arg = [None, 2, 3, None, 5]
+ expected = [2, 3, 5]
+ result = my_obj.filter_out_none_entries(arg)
+ assert expected == result
+
+ def test_filter_dict_dict(self):
+ ''' simple dict return simple dict '''
+ my_obj = na_helper()
+ arg = dict(a=None, b=dict(u=1, v=None, w=2), c=dict(), d=2, e=3)
+ expected = dict(b=dict(u=1, w=2), d=2, e=3)
+ result = my_obj.filter_out_none_entries(arg)
+ assert expected == result
+
+ def test_filter_list_list(self):
+ ''' simple list return simple list '''
+ my_obj = na_helper()
+ arg = [None, [1, None, 3], 3, None, 5]
+ expected = [[1, 3], 3, 5]
+ result = my_obj.filter_out_none_entries(arg)
+ assert expected == result
+
+ def test_filter_dict_list_dict(self):
+ ''' simple dict return simple dict '''
+ my_obj = na_helper()
+ arg = dict(a=None, b=[dict(u=1, v=None, w=2), 5, None, dict(x=6, y=None)], c=dict(), d=2, e=3)
+ expected = dict(b=[dict(u=1, w=2), 5, dict(x=6)], d=2, e=3)
+ result = my_obj.filter_out_none_entries(arg)
+ assert expected == result
+
+ def test_filter_list_dict_list(self):
+ ''' simple list return simple list '''
+ my_obj = na_helper()
+ arg = [None, [1, None, 3], dict(a=None, b=[7, None, 9], c=None, d=dict(u=None, v=10)), None, 5]
+ expected = [[1, 3], dict(b=[7, 9], d=dict(v=10)), 5]
+ result = my_obj.filter_out_none_entries(arg)
+ assert expected == result
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py
new file mode 100644
index 00000000..58d4eac1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py
@@ -0,0 +1,419 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests for Ansible module: na_ontap_aggregate """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_aggregate \
+ import NetAppOntapAggregate as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+AGGR_NAME = 'aggr_name'
+OS_NAME = 'abc'
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None, parm2=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.parm2 = parm2
+ self.xml_in = None
+ self.xml_out = None
+ self.zapis = list()
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ print('request:', xml.to_string())
+ zapi = xml.get_name()
+ self.zapis.append(zapi)
+ if zapi == 'aggr-object-store-get-iter':
+ if self.type in ('aggregate_no_object_store',):
+ xml = None
+ else:
+ xml = self.build_object_store_info()
+ elif self.type in ('aggregate', 'aggr_disks', 'aggr_mirrors', 'aggregate_no_object_store'):
+ with_os = self.type != 'aggregate_no_object_store'
+ xml = self.build_aggregate_info(self.parm1, self.parm2, with_object_store=with_os)
+ if self.type in ('aggr_disks', 'aggr_mirrors'):
+ self.type = 'disks'
+ elif self.type == 'no_aggregate':
+ xml = None
+ elif self.type == 'no_aggregate_then_aggregate':
+ xml = None
+ self.type = 'aggregate'
+ elif self.type == 'disks':
+ xml = self.build_disk_info()
+ elif self.type == 'aggregate_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_aggregate_info(vserver, aggregate, with_object_store):
+ ''' build xml data for aggregate and vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 3,
+ 'attributes-list':
+ {'aggr-attributes':
+ {'aggregate-name': aggregate,
+ 'aggr-raid-attributes': {'state': 'offline'}
+ },
+ 'object-store-information': {'object-store-name': 'abc'}
+ },
+ 'vserver-info':
+ {'vserver-name': vserver
+ }
+ }
+ if not with_object_store:
+ del data['attributes-list']['object-store-information']
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+ @staticmethod
+ def build_object_store_info():
+ ''' build xml data for object_store '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 3,
+ 'attributes-list':
+ {'object-store-information': {'object-store-name': 'abc'}
+ }
+ }
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+ @staticmethod
+ def build_disk_info():
+ ''' build xml data for disk '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': [
+ {'disk-info':
+ {'disk-name': '1',
+ 'disk-raid-info':
+ {'disk-aggregate-info':
+ {'plex-name': 'plex0'}
+ }}},
+ {'disk-info':
+ {'disk-name': '2',
+ 'disk-raid-info':
+ {'disk-aggregate-info':
+ {'plex-name': 'plex0'}
+ }}},
+ {'disk-info':
+ {'disk-name': '3',
+ 'disk-raid-info':
+ {'disk-aggregate-info':
+ {'plex-name': 'plexM'}
+ }}},
+ {'disk-info':
+ {'disk-name': '4',
+ 'disk-raid-info':
+ {'disk-aggregate-info':
+ {'plex-name': 'plexM'}
+ }}},
+ ]}
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection('aggregate', '12', 'name')
+ # whether to use a mock or a simulator
+ self.onbox = False
+ self.zapis = list()
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.193.74.78'
+ username = 'admin'
+ password = 'netapp1!'
+ name = 'name'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ name = AGGR_NAME
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'name': name
+ })
+
+ def call_command(self, module_args, what=None):
+ ''' utility function to call apply '''
+ args = dict(self.set_default_args())
+ args.update(module_args)
+ set_module_args(args)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ aggregate = 'aggregate'
+ if what == 'disks':
+ aggregate = 'aggr_disks'
+ elif what == 'mirrors':
+ aggregate = 'aggr_mirrors'
+ elif what is not None:
+ aggregate = what
+
+ if not self.onbox:
+ # mock the connection
+ my_obj.server = MockONTAPConnection(aggregate, '12', AGGR_NAME)
+ self.zapis = my_obj.server.zapis
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ return exc.value.args[0]['changed']
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_create(self):
+ module_args = {
+ 'disk_count': '2',
+ 'is_mirrored': 'true',
+ }
+ changed = self.call_command(module_args, what='no_aggregate')
+ assert changed
+ assert 'aggr-object-store-attach' not in self.zapis
+
+ def test_create_with_object_store(self):
+ module_args = {
+ 'disk_count': '2',
+ 'is_mirrored': 'true',
+ 'object_store_name': 'abc'
+ }
+ changed = self.call_command(module_args, what='no_aggregate')
+ assert changed
+ assert 'aggr-object-store-attach' in self.zapis
+
+ def test_is_mirrored(self):
+ module_args = {
+ 'disk_count': '2',
+ 'is_mirrored': 'true',
+ }
+ changed = self.call_command(module_args)
+ assert not changed
+
+ def test_disks_list(self):
+ module_args = {
+ 'disks': ['1', '2'],
+ }
+ changed = self.call_command(module_args, 'disks')
+ assert not changed
+
+ def test_mirror_disks(self):
+ module_args = {
+ 'disks': ['1', '2'],
+ 'mirror_disks': ['3', '4']
+ }
+ changed = self.call_command(module_args, 'mirrors')
+ assert not changed
+
+ def test_spare_pool(self):
+ module_args = {
+ 'disk_count': '2',
+ 'spare_pool': 'Pool1'
+ }
+ changed = self.call_command(module_args)
+ assert not changed
+
+ def test_rename(self):
+ module_args = {
+ 'from_name': 'test_name2'
+ }
+ changed = self.call_command(module_args, 'no_aggregate_then_aggregate')
+ assert changed
+ assert 'aggr-rename' in self.zapis
+
+ def test_rename_error_no_from(self):
+ module_args = {
+ 'from_name': 'test_name2'
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.call_command(module_args, 'no_aggregate')
+ msg = 'Error renaming: aggregate %s does not exist' % module_args['from_name']
+ assert msg in exc.value.args[0]['msg']
+
+ def test_rename_with_add_object_store(self):
+ module_args = {
+ 'from_name': 'test_name2'
+ }
+ changed = self.call_command(module_args, 'aggregate_no_object_store')
+ assert not changed
+
+ def test_object_store_present(self):
+ module_args = {
+ 'object_store_name': 'abc'
+ }
+ changed = self.call_command(module_args)
+ assert not changed
+
+ def test_object_store_create(self):
+ module_args = {
+ 'object_store_name': 'abc'
+ }
+ changed = self.call_command(module_args, 'aggregate_no_object_store')
+ assert changed
+
+ def test_object_store_modify(self):
+ ''' not supported '''
+ module_args = {
+ 'object_store_name': 'def'
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.call_command(module_args)
+ msg = 'Error: object store %s is already associated with aggregate %s.' % (OS_NAME, AGGR_NAME)
+ assert msg in exc.value.args[0]['msg']
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'service_state': 'online'})
+ module_args.update({'unmount_volumes': 'True'})
+ module_args.update({'from_name': 'test_name2'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('aggregate_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.aggr_get_iter(module_args.get('name'))
+ assert '' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.aggregate_online()
+ assert 'Error changing the state of aggregate' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.aggregate_offline()
+ assert 'Error changing the state of aggregate' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_aggr()
+ assert 'Error provisioning aggregate' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_aggr()
+ assert 'Error removing aggregate' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.rename_aggregate()
+ assert 'Error renaming aggregate' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.apply()
+ assert 'TEST:This exception is from the unit test' in exc.value.args[0]['msg']
+
+ def test_disks_bad_mapping(self):
+ module_args = {
+ 'disks': ['0'],
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.call_command(module_args, 'mirrors')
+ msg = "Error mapping disks for aggregate %s: cannot not match disks with current aggregate disks." % AGGR_NAME
+ assert exc.value.args[0]['msg'].startswith(msg)
+
+ def test_disks_overlapping_mirror(self):
+ module_args = {
+ 'disks': ['1', '2', '3'],
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.call_command(module_args, 'mirrors')
+ msg = "Error mapping disks for aggregate %s: found overlapping plexes:" % AGGR_NAME
+ assert exc.value.args[0]['msg'].startswith(msg)
+
+ def test_disks_removing_disk(self):
+ module_args = {
+ 'disks': ['1'],
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.call_command(module_args, 'mirrors')
+ msg = "Error removing disks is not supported. Aggregate %s: these disks cannot be removed: ['2']." % AGGR_NAME
+ assert exc.value.args[0]['msg'].startswith(msg)
+
+ def test_disks_removing_mirror_disk(self):
+ module_args = {
+ 'disks': ['1', '2'],
+ 'mirror_disks': ['4', '6']
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.call_command(module_args, 'mirrors')
+ msg = "Error removing disks is not supported. Aggregate %s: these disks cannot be removed: ['3']." % AGGR_NAME
+ assert exc.value.args[0]['msg'].startswith(msg)
+
+ def test_disks_add(self):
+ module_args = {
+ 'disks': ['1', '2', '5'],
+ }
+ changed = self.call_command(module_args, 'disks')
+ assert changed
+
+ def test_mirror_disks_add(self):
+ module_args = {
+ 'disks': ['1', '2', '5'],
+ 'mirror_disks': ['3', '4', '6']
+ }
+ changed = self.call_command(module_args, 'mirrors')
+ assert changed
+
+ def test_mirror_disks_add_unbalanced(self):
+ module_args = {
+ 'disks': ['1', '2'],
+ 'mirror_disks': ['3', '4', '6']
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.call_command(module_args, 'mirrors')
+ msg = "Error cannot add mirror disks ['6'] without adding disks for aggregate %s." % AGGR_NAME
+ assert exc.value.args[0]['msg'].startswith(msg)
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport.py
new file mode 100644
index 00000000..c5c591f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport.py
@@ -0,0 +1,245 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_autosupport \
+ import NetAppONTAPasup as asup_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'asup':
+ xml = self.build_asup_config_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_asup_config_info(asup_data):
+ ''' build xml data for asup-config '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {'attributes': {'autosupport-config-info': {
+ 'node-name': asup_data['node_name'],
+ 'is-enabled': asup_data['is_enabled'],
+ 'is-support-enabled': asup_data['support'],
+ 'proxy-url': asup_data['proxy_url'],
+ 'post-url': asup_data['post_url'],
+ 'transport': asup_data['transport'],
+ 'is-node-in-subject': 'false',
+ 'from': 'test',
+ 'mail-hosts': [{'string': '1.2.3.4'}, {'string': '4.5.6.8'}],
+ 'noteto': [{'mail-address': 'abc@test.com'},
+ {'mail-address': 'def@test.com'}],
+ }}}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_asup = {
+ 'node_name': 'test-vsim1',
+ 'transport': 'https',
+ 'support': 'false',
+ 'post_url': 'testbed.netapp.com/asupprod/post/1.0/postAsup',
+ 'proxy_url': 'something.com',
+ }
+
+ def mock_args(self):
+ return {
+ 'node_name': self.mock_asup['node_name'],
+ 'transport': self.mock_asup['transport'],
+ 'support': self.mock_asup['support'],
+ 'post_url': self.mock_asup['post_url'],
+ 'proxy_url': self.mock_asup['proxy_url'],
+ 'hostname': 'host',
+ 'username': 'admin',
+ 'password': 'password',
+ }
+
+ def get_asup_mock_object(self, kind=None, enabled='false'):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume object
+ """
+ asup_obj = asup_module()
+ asup_obj.autosupport_log = Mock(return_value=None)
+ if kind is None:
+ asup_obj.server = MockONTAPConnection()
+ else:
+ data = self.mock_asup
+ data['is_enabled'] = enabled
+ asup_obj.server = MockONTAPConnection(kind='asup', data=data)
+ return asup_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ asup_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_enable_asup(self):
+ ''' a more interesting test '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_asup_mock_object('asup').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_disable_asup(self):
+ ''' a more interesting test '''
+ # enable asup
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_asup_mock_object(kind='asup', enabled='true').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_result_from_get(self):
+ ''' Check boolean and service_state conversion from get '''
+ data = self.mock_args()
+ set_module_args(data)
+ obj = self.get_asup_mock_object(kind='asup', enabled='true')
+ # constructed based on valued passed in self.mock_asup and build_asup_config_info()
+ expected_dict = {
+ 'node_name': 'test-vsim1',
+ 'service_state': 'started',
+ 'support': False,
+ 'hostname_in_subject': False,
+ 'transport': self.mock_asup['transport'],
+ 'post_url': self.mock_asup['post_url'],
+ 'proxy_url': self.mock_asup['proxy_url'],
+ 'from_address': 'test',
+ 'mail_hosts': ['1.2.3.4', '4.5.6.8'],
+ 'partner_addresses': [],
+ 'to_addresses': [],
+ 'noteto': ['abc@test.com', 'def@test.com']
+ }
+ result = obj.get_autosupport_config()
+ assert result == expected_dict
+
+ def test_modify_config(self):
+ ''' Check boolean and service_state conversion from get '''
+ data = self.mock_args()
+ data['transport'] = 'http'
+ data['post_url'] = 'somethingelse.com'
+ data['proxy_url'] = 'somethingelse.com'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_asup_mock_object('asup').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_autosupport.NetAppONTAPasup.get_autosupport_config')
+ def test_get_called(self, get_asup):
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_asup_mock_object('asup').apply()
+ get_asup.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_autosupport.NetAppONTAPasup.modify_autosupport_config')
+ def test_modify_called(self, modify_asup):
+ data = self.mock_args()
+ data['transport'] = 'http'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_asup_mock_object('asup').apply()
+ modify_asup.assert_called_with({'transport': 'http', 'service_state': 'started'})
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_autosupport.NetAppONTAPasup.modify_autosupport_config')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_autosupport.NetAppONTAPasup.get_autosupport_config')
+ def test_modify_not_called(self, get_asup, modify_asup):
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_asup_mock_object('asup').apply()
+ get_asup.assert_called_with()
+ modify_asup.assert_not_called()
+
+ def test_modify_packet(self):
+ '''check XML construction for nested attributes like mail-hosts, noteto, partner-address, and to'''
+ data = self.mock_args()
+ set_module_args(data)
+ obj = self.get_asup_mock_object(kind='asup', enabled='true')
+ modify_dict = {
+ 'noteto': ['one@test.com'],
+ 'partner_addresses': ['firstpartner@test.com'],
+ 'mail_hosts': ['1.1.1.1'],
+ 'to_addresses': ['first@test.com']
+ }
+ obj.modify_autosupport_config(modify_dict)
+ xml = obj.server.xml_in
+ for key in ['noteto', 'to', 'partner-address']:
+ assert xml[key] is not None
+ assert xml[key]['mail-address'] is not None
+ assert xml['noteto']['mail-address'] == modify_dict['noteto'][0]
+ assert xml['to']['mail-address'] == modify_dict['to_addresses'][0]
+ assert xml['partner-address']['mail-address'] == modify_dict['partner_addresses'][0]
+ assert xml['mail-hosts'] is not None
+ assert xml['mail-hosts']['string'] is not None
+ assert xml['mail-hosts']['string'] == modify_dict['mail_hosts'][0]
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py
new file mode 100644
index 00000000..b250bdef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py
@@ -0,0 +1,135 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_autosupport_invoke '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_autosupport_invoke \
+ import NetAppONTAPasupInvoke as invoke_module # module under test
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error")
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def invoke_successfully(self, xml, enable_tunneling):
+ raise netapp_utils.zapi.NaApiError('test', 'Expected error')
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_wwpn_alias '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_invoke = {
+ 'name': 'test_node',
+ 'message': 'test_message',
+ 'type': 'all'
+ }
+
+ def mock_args(self):
+ return {
+ 'message': self.mock_invoke['message'],
+ 'name': self.mock_invoke['name'],
+ 'type': self.mock_invoke['type'],
+ 'hostname': 'test_host',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_invoke_mock_object(self, use_rest=True):
+ invoke_obj = invoke_module()
+ if not use_rest:
+ invoke_obj.ems_log_event = Mock()
+ invoke_obj.server = MockONTAPConnection()
+ return invoke_obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_send(self, mock_request):
+ '''Test successful send message'''
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_invoke_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_send_error(self, mock_request):
+ '''Test rest send error'''
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_invoke_mock_object().apply()
+ msg = "Error on sending autosupport message to node %s: Expected error." % data['name']
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_zapi_send_error(self):
+ '''Test rest send error'''
+ data = self.mock_args()
+ data['use_rest'] = 'Never'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_invoke_mock_object(use_rest=False).apply()
+ msg = "Error on sending autosupport message to node %s: NetApp API failed. Reason - test:Expected error." % data['name']
+ assert exc.value.args[0]['msg'] == msg
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py
new file mode 100644
index 00000000..86a0b8d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py
@@ -0,0 +1,309 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain \
+ import NetAppOntapBroadcastDomain as broadcast_domain_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'broadcast_domain':
+ xml = self.build_broadcast_domain_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_broadcast_domain_info(broadcast_domain_details):
+ ''' build xml data for broadcast_domain info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'net-port-broadcast-domain-info': {
+ 'broadcast-domain': broadcast_domain_details['name'],
+ 'ipspace': broadcast_domain_details['ipspace'],
+ 'mtu': broadcast_domain_details['mtu'],
+ 'ports': {
+ 'port-info': {
+ 'port': 'test_port_1'
+ }
+ }
+ }
+
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_broadcast_domain = {
+ 'name': 'test_broadcast_domain',
+ 'mtu': '1000',
+ 'ipspace': 'Default',
+ 'ports': 'test_port_1'
+ }
+
+ def mock_args(self):
+ return {
+ 'name': self.mock_broadcast_domain['name'],
+ 'ipspace': self.mock_broadcast_domain['ipspace'],
+ 'mtu': self.mock_broadcast_domain['mtu'],
+ 'ports': self.mock_broadcast_domain['ports'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_broadcast_domain_mock_object(self, kind=None, data=None):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection()
+ :param data: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume object
+ """
+ broadcast_domain_obj = broadcast_domain_module()
+ broadcast_domain_obj.asup_log_for_cserver = Mock(return_value=None)
+ broadcast_domain_obj.cluster = Mock()
+ broadcast_domain_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ broadcast_domain_obj.server = MockONTAPConnection()
+ else:
+ if data is None:
+ broadcast_domain_obj.server = MockONTAPConnection(kind='broadcast_domain', data=self.mock_broadcast_domain)
+ else:
+ broadcast_domain_obj.server = MockONTAPConnection(kind='broadcast_domain', data=data)
+ return broadcast_domain_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ broadcast_domain_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_net_route(self):
+ ''' Test if get_broadcast_domain returns None for non-existent broadcast_domain '''
+ set_module_args(self.mock_args())
+ result = self.get_broadcast_domain_mock_object().get_broadcast_domain()
+ assert result is None
+
+ def test_create_error_missing_broadcast_domain(self):
+ ''' Test if create throws an error if broadcast_domain is not specified'''
+ data = self.mock_args()
+ del data['name']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_broadcast_domain_mock_object('broadcast_domain').create_broadcast_domain()
+ msg = 'missing required arguments: name'
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.create_broadcast_domain')
+ def test_successful_create(self, create_broadcast_domain):
+ ''' Test successful create '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_broadcast_domain_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ create_broadcast_domain.assert_called_with()
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ obj = self.get_broadcast_domain_mock_object('broadcast_domain')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_modify_mtu(self):
+ ''' Test successful modify mtu '''
+ data = self.mock_args()
+ data['mtu'] = '1200'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_broadcast_domain_mock_object('broadcast_domain').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_ipspace_idempotency(self):
+ ''' Test modify ipsapce idempotency'''
+ data = self.mock_args()
+ data['ipspace'] = 'Cluster'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_broadcast_domain_mock_object('broadcast_domain').apply()
+ msg = 'A domain ipspace can not be modified after the domain has been created.'
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.add_broadcast_domain_ports')
+ def test_add_ports(self, add_broadcast_domain_ports):
+ ''' Test successful modify ports '''
+ data = self.mock_args()
+ data['ports'] = 'test_port_1,test_port_2'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_broadcast_domain_mock_object('broadcast_domain').apply()
+ assert exc.value.args[0]['changed']
+ add_broadcast_domain_ports.assert_called_with(['test_port_2'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain_ports')
+ def test_delete_ports(self, delete_broadcast_domain_ports):
+ ''' Test successful modify ports '''
+ data = self.mock_args()
+ data['ports'] = ''
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_broadcast_domain_mock_object('broadcast_domain').apply()
+ assert exc.value.args[0]['changed']
+ delete_broadcast_domain_ports.assert_called_with(['test_port_1'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.modify_broadcast_domain')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.split_broadcast_domain')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain')
+ def test_split_broadcast_domain(self, get_broadcast_domain, split_broadcast_domain, modify_broadcast_domain):
+ ''' Test successful split broadcast domain '''
+ data = self.mock_args()
+ data['from_name'] = 'test_broadcast_domain'
+ data['name'] = 'test_broadcast_domain_2'
+ data['ports'] = 'test_port_2'
+ set_module_args(data)
+ current = {
+ 'name': 'test_broadcast_domain',
+ 'mtu': '1000',
+ 'ipspace': 'Default',
+ 'ports': ['test_port_1,test_port2']
+ }
+ get_broadcast_domain.side_effect = [
+ None,
+ current,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_broadcast_domain_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ modify_broadcast_domain.assert_not_called()
+ split_broadcast_domain.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.modify_broadcast_domain')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain')
+ def test_split_broadcast_domain_modify_delete(self, get_broadcast_domain, modify_broadcast_domain, delete_broadcast_domain):
+ ''' Test successful split broadcast domain '''
+ data = self.mock_args()
+ data['from_name'] = 'test_broadcast_domain'
+ data['name'] = 'test_broadcast_domain_2'
+ data['ports'] = ['test_port_1', 'test_port_2']
+ data['mtu'] = '1200'
+ set_module_args(data)
+
+ current = {
+ 'name': 'test_broadcast_domain',
+ 'mtu': '1000',
+ 'ipspace': 'Default',
+ 'ports': ['test_port_1', 'test_port2']
+ }
+ get_broadcast_domain.side_effect = [
+ None,
+ current,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_broadcast_domain_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ delete_broadcast_domain.assert_called_with('test_broadcast_domain')
+ modify_broadcast_domain.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain')
+ def test_split_broadcast_domain_not_exist(self, get_broadcast_domain):
+ ''' Test successful split broadcast domain '''
+ data = self.mock_args()
+ data['from_name'] = 'test_broadcast_domain'
+ data['name'] = 'test_broadcast_domain_2'
+ data['ports'] = 'test_port_2'
+ set_module_args(data)
+
+ get_broadcast_domain.side_effect = [
+ None,
+ None,
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_broadcast_domain_mock_object().apply()
+ msg = 'A domain can not be split if it does not exist.'
+ assert exc.value.args[0]['msg'], msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.split_broadcast_domain')
+ def test_split_broadcast_domain_idempotency(self, split_broadcast_domain):
+ ''' Test successful split broadcast domain '''
+ data = self.mock_args()
+ data['from_name'] = 'test_broadcast_domain'
+ data['ports'] = 'test_port_1'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_broadcast_domain_mock_object('broadcast_domain').apply()
+ assert exc.value.args[0]['changed'] is False
+ split_broadcast_domain.assert_not_called()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py
new file mode 100644
index 00000000..7bc8dfbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py
@@ -0,0 +1,116 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_cg_snapshot'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cg_snapshot \
+ import NetAppONTAPCGSnapshot as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'vserver':
+ xml = self.build_vserver_info(self.parm1)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_vserver_info(vserver):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('attributes-list')
+ attributes.add_node_with_children('vserver-info',
+ **{'vserver-name': vserver})
+ xml.add_child_elem(attributes)
+ # print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_command_called(self):
+ ''' a more interesting test '''
+ set_module_args({
+ 'vserver': 'vserver',
+ 'volumes': 'volumes',
+ 'snapshot': 'snapshot',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ my_obj = my_module()
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.cgcreate()
+ msg = 'Error fetching CG ID for CG commit snapshot'
+ assert exc.value.args[0]['msg'] == msg
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs.py
new file mode 100644
index 00000000..d35f816c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs.py
@@ -0,0 +1,228 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_cifs '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs \
+ import NetAppONTAPCifsShare as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'cifs':
+ xml = self.build_cifs_info()
+ elif self.type == 'cifs_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_cifs_info():
+ ''' build xml data for cifs-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1, 'attributes-list': {'cifs-share': {
+ 'share-name': 'test',
+ 'path': '/test',
+ 'vscan-fileop-profile': 'standard',
+ 'share-properties': [{'cifs-share-properties': 'browsable'},
+ {'cifs-share-properties': 'oplocks'}],
+ 'symlink-properties': [{'cifs-share-symlink-properties': 'enable'},
+ {'cifs-share-symlink-properties': 'read_only'}],
+ }}}
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.193.77.37'
+ username = 'admin'
+ password = 'netapp1!'
+ share_name = 'test'
+ path = '/test'
+ share_properties = 'browsable,oplocks'
+ symlink_properties = 'disable'
+ vscan_fileop_profile = 'standard'
+ vserver = 'abc'
+ else:
+ hostname = '10.193.77.37'
+ username = 'admin'
+ password = 'netapp1!'
+ share_name = 'test'
+ path = '/test'
+ share_properties = 'show_previous_versions'
+ symlink_properties = 'disable'
+ vscan_fileop_profile = 'no_scan'
+ vserver = 'abc'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'share_name': share_name,
+ 'path': path,
+ 'share_properties': share_properties,
+ 'symlink_properties': symlink_properties,
+ 'vscan_fileop_profile': vscan_fileop_profile,
+ 'vserver': vserver
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_cifs_get_called(self):
+ ''' fetching details of cifs '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ cifs_get = my_obj.get_cifs_share()
+ print('Info: test_cifs_share_get: %s' % repr(cifs_get))
+ assert not bool(cifs_get)
+
+ def test_ensure_apply_for_cifs_called(self):
+ ''' creating cifs share and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('cifs')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs.NetAppONTAPCifsShare.create_cifs_share')
+ def test_cifs_create_called(self, create_cifs_share):
+ ''' creating cifs'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_apply: %s' % repr(exc.value))
+ create_cifs_share.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs.NetAppONTAPCifsShare.delete_cifs_share')
+ def test_cifs_delete_called(self, delete_cifs_share):
+ ''' deleting cifs'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args['state'] = 'absent'
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('cifs')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_apply: %s' % repr(exc.value))
+ delete_cifs_share.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs.NetAppONTAPCifsShare.modify_cifs_share')
+ def test_cifs_modify_called(self, modify_cifs_share):
+ ''' modifying cifs'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('cifs')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_apply: %s' % repr(exc.value))
+ modify_cifs_share.assert_called_with()
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('cifs_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_cifs_share()
+ assert 'Error creating cifs-share' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_cifs_share()
+ assert 'Error deleting cifs-share' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_cifs_share()
+ assert 'Error modifying cifs-share' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_server.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_server.py
new file mode 100644
index 00000000..27b368ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_server.py
@@ -0,0 +1,222 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_cifs_server '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_server \
+ import NetAppOntapcifsServer as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None, parm2=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.parm2 = parm2
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'cifs_server':
+ xml = self.build_vserver_info(self.parm1, self.parm2)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_vserver_info(cifs_server, admin_status):
+ ''' build xml data for cifs-server-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'cifs-server-config': {'cifs-server': cifs_server,
+ 'administrative-status': admin_status}}}
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.use_vsim = False
+
+ def set_default_args(self):
+ if self.use_vsim:
+ hostname = '10.193.77.154'
+ username = 'admin'
+ password = 'netapp1!'
+ cifs_server = 'test'
+ vserver = 'ansible_test'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ cifs_server = 'name'
+ vserver = 'vserver'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'cifs_server_name': cifs_server,
+ 'vserver': vserver
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_cifs_server_get_called(self):
+ ''' a more interesting test '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ cifs_server = my_obj.get_cifs_server()
+ print('Info: test_cifs_server_get: %s' % repr(cifs_server))
+ assert cifs_server is None
+
+ def test_ensure_cifs_server_apply_for_create_called(self):
+ ''' creating cifs server and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'cifs_server_name': 'create'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_server_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cifs_server', 'create', 'up')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_server_apply_for_create: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ def test_ensure_cifs_server_apply_for_delete_called(self):
+ ''' deleting cifs server and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'cifs_server_name': 'delete'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cifs_server', 'delete', 'up')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_server_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ module_args.update({'state': 'absent'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cifs_server', 'delete', 'up')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cifs_server_delete: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_ensure_start_cifs_server_called(self):
+ ''' starting cifs server and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'cifs_server_name': 'delete'})
+ module_args.update({'service_state': 'started'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cifs_server', 'test', 'up')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ensure_start_cifs_server: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ module_args.update({'service_state': 'stopped'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cifs_server', 'test', 'up')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ensure_start_cifs_server: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_ensure_stop_cifs_server_called(self):
+ ''' stopping cifs server and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'cifs_server_name': 'delete'})
+ module_args.update({'service_state': 'stopped'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cifs_server', 'test', 'down')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ensure_stop_cifs_server: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ module_args.update({'service_state': 'started'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cifs_server', 'test', 'down')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ensure_stop_cifs_server: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster.py
new file mode 100644
index 00000000..8d90c477
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster.py
@@ -0,0 +1,429 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_cluster '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster \
+ import NetAppONTAPCluster as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'cluster':
+ xml = self.build_cluster_info()
+ if self.type == 'cluster_success':
+ xml = self.build_cluster_info_success()
+ elif self.type == 'cluster_add':
+ xml = self.build_add_node_info()
+ elif self.type == 'cluster_extra_input':
+ self.type = 'cluster' # success on second call
+ raise netapp_utils.zapi.NaApiError(code='TEST1', message="Extra input: single-node-cluster")
+ elif self.type == 'cluster_extra_input_loop':
+ raise netapp_utils.zapi.NaApiError(code='TEST2', message="Extra input: single-node-cluster")
+ elif self.type == 'cluster_extra_input_other':
+ raise netapp_utils.zapi.NaApiError(code='TEST3', message="Extra input: other-unexpected-element")
+ elif self.type == 'cluster_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST4', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ def autosupport_log(self):
+ ''' mock autosupport log'''
+ return None
+
+ @staticmethod
+ def build_cluster_info():
+ ''' build xml data for cluster-create-join-progress-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'attributes': {
+ 'cluster-create-join-progress-info': {
+ 'is-complete': 'true',
+ 'status': 'whatever'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_cluster_info_success():
+ ''' build xml data for cluster-create-join-progress-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'attributes': {
+ 'cluster-create-join-progress-info': {
+ 'is-complete': 'false',
+ 'status': 'success'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_add_node_info():
+ ''' build xml data for cluster-create-add-node-status-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'attributes-list': {
+ 'cluster-create-add-node-status-info': {
+ 'failure-msg': '',
+ 'status': 'success'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.use_vsim = False
+
+ def set_default_args(self):
+ if self.use_vsim:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ cluster_name = 'abc'
+ else:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ cluster_name = 'abc'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'cluster_name': cluster_name
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ def test_ensure_apply_for_cluster_called(self, get_cl_id):
+ ''' creating cluster and checking idempotency '''
+ get_cl_id.return_value = None
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.create_cluster')
+ def test_cluster_create_called(self, cluster_create, get_cl_id):
+ ''' creating cluster'''
+ get_cl_id.return_value = None
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_success')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ cluster_create.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ def test_cluster_create_old_api(self, get_cl_id):
+ ''' creating cluster'''
+ get_cl_id.return_value = None
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_extra_input')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ def test_cluster_create_old_api_loop(self, get_cl_id):
+ ''' creating cluster'''
+ get_cl_id.return_value = None
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_extra_input_loop')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = 'TEST2:Extra input: single-node-cluster'
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ def test_cluster_create_old_api_other_extra(self, get_cl_id):
+ ''' creating cluster'''
+ get_cl_id.return_value = None
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_extra_input_other')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = 'TEST3:Extra input: other-unexpected-element'
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_ip_addresses')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.add_node')
+ def test_add_node_called(self, add_node, get_cl_id, get_cl_ips):
+ ''' creating add_node'''
+ get_cl_ips.return_value = list()
+ get_cl_id.return_value = None
+ data = self.set_default_args()
+ del data['cluster_name']
+ data['cluster_ip_address'] = '10.10.10.10'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_add')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ add_node.assert_called_with()
+ assert exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_cluster()
+ assert 'Error creating cluster' in exc.value.args[0]['msg']
+ data = self.set_default_args()
+ data['cluster_ip_address'] = '10.10.10.10'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.add_node()
+ assert 'Error adding node with ip' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_ip_addresses')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.add_node')
+ def test_add_node_idempotent(self, add_node, get_cl_id, get_cl_ips):
+ ''' creating add_node'''
+ get_cl_ips.return_value = ['10.10.10.10']
+ get_cl_id.return_value = None
+ data = self.set_default_args()
+ del data['cluster_name']
+ data['cluster_ip_address'] = '10.10.10.10'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_add')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ try:
+ add_node.assert_not_called()
+ except AttributeError:
+ # not supported with python <= 3.4
+ pass
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_ip_addresses')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.remove_node')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.node_remove_wait')
+ def test_remove_node_ip(self, wait, remove_node, get_cl_id, get_cl_ips):
+ ''' creating add_node'''
+ get_cl_ips.return_value = ['10.10.10.10']
+ get_cl_id.return_value = None
+ wait.return_value = None
+ data = self.set_default_args()
+ # del data['cluster_name']
+ data['cluster_ip_address'] = '10.10.10.10'
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_add')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ remove_node.assert_called_with()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_ip_addresses')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.remove_node')
+ def test_remove_node_ip_idempotent(self, remove_node, get_cl_id, get_cl_ips):
+ ''' creating add_node'''
+ get_cl_ips.return_value = list()
+ get_cl_id.return_value = None
+ data = self.set_default_args()
+ # del data['cluster_name']
+ data['cluster_ip_address'] = '10.10.10.10'
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_add')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ try:
+ remove_node.assert_not_called()
+ except AttributeError:
+ # not supported with python <= 3.4
+ pass
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_nodes')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.remove_node')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.node_remove_wait')
+ def test_remove_node_name(self, wait, remove_node, get_cl_id, get_cl_nodes):
+ ''' creating add_node'''
+ get_cl_nodes.return_value = ['node1', 'node2']
+ get_cl_id.return_value = None
+ wait.return_value = None
+ data = self.set_default_args()
+ # del data['cluster_name']
+ data['node_name'] = 'node2'
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_add')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ remove_node.assert_called_with()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_nodes')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.remove_node')
+ def test_remove_node_name_idempotent(self, remove_node, get_cl_id, get_cl_nodes):
+ ''' creating add_node'''
+ get_cl_nodes.return_value = ['node1', 'node2']
+ get_cl_id.return_value = None
+ data = self.set_default_args()
+ # del data['cluster_name']
+ data['node_name'] = 'node3'
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('cluster_add')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_cluster_apply: %s' % repr(exc.value))
+ try:
+ remove_node.assert_not_called()
+ except AttributeError:
+ # not supported with python <= 3.4
+ pass
+ assert not exc.value.args[0]['changed']
+
+ def test_remove_node_name_and_id(self):
+ ''' creating add_node'''
+ data = self.set_default_args()
+ # del data['cluster_name']
+ data['cluster_ip_address'] = '10.10.10.10'
+ data['node_name'] = 'node3'
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ print('Info: test_remove_node_name_and_id: %s' % repr(exc.value))
+ msg = 'when state is "absent", parameters are mutually exclusive: cluster_ip_address|node_name'
+ assert msg in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_peer.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_peer.py
new file mode 100644
index 00000000..690563c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_peer.py
@@ -0,0 +1,212 @@
+''' unit tests ONTAP Ansible module: na_ontap_cluster_peer '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster_peer \
+ import NetAppONTAPClusterPeer as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.data = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'cluster_peer':
+ xml = self.build_cluster_peer_info(self.data)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_cluster_peer_info(parm1):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'cluster-peer-info': {
+ 'cluster-name': parm1['dest_cluster_name'],
+ 'peer-addresses': parm1['dest_intercluster_lifs']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_cluster_peer = {
+ 'source_intercluster_lifs': '1.2.3.4,1.2.3.5',
+ 'dest_intercluster_lifs': '1.2.3.6,1.2.3.7',
+ 'passphrase': 'netapp123',
+ 'dest_hostname': '10.20.30.40',
+ 'dest_cluster_name': 'cluster2',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+
+ }
+
+ def mock_args(self):
+ return {
+ 'source_intercluster_lifs': self.mock_cluster_peer['source_intercluster_lifs'],
+ 'dest_intercluster_lifs': self.mock_cluster_peer['dest_intercluster_lifs'],
+ 'passphrase': self.mock_cluster_peer['passphrase'],
+ 'dest_hostname': self.mock_cluster_peer['dest_hostname'],
+ 'dest_cluster_name': 'cluster2',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def get_cluster_peer_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_cluster_peer object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_cluster_peer object
+ """
+ cluster_peer_obj = my_module()
+ cluster_peer_obj.asup_log_for_cserver = Mock(return_value=None)
+ cluster_peer_obj.cluster = Mock()
+ cluster_peer_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ cluster_peer_obj.server = MockONTAPConnection()
+ cluster_peer_obj.dest_server = MockONTAPConnection()
+ else:
+ cluster_peer_obj.server = MockONTAPConnection(kind=kind, parm1=self.mock_cluster_peer)
+ cluster_peer_obj.dest_server = MockONTAPConnection(kind=kind, parm1=self.mock_cluster_peer)
+ return cluster_peer_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster_peer.NetAppONTAPClusterPeer.cluster_peer_get')
+ def test_successful_create(self, cluster_peer_get):
+ ''' Test successful create '''
+ set_module_args(self.mock_args())
+ cluster_peer_get.side_effect = [
+ None,
+ None
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_cluster_peer_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster_peer.NetAppONTAPClusterPeer.cluster_peer_get')
+ def test_create_idempotency(self, cluster_peer_get):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ current1 = {
+ 'cluster_name': 'cluster1',
+ 'peer-addresses': '1.2.3.6,1.2.3.7'
+ }
+ current2 = {
+ 'cluster_name': 'cluster2',
+ 'peer-addresses': '1.2.3.4,1.2.3.5'
+ }
+ cluster_peer_get.side_effect = [
+ current1,
+ current2
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_cluster_peer_mock_object('cluster_peer').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster_peer.NetAppONTAPClusterPeer.cluster_peer_get')
+ def test_successful_delete(self, cluster_peer_get):
+ ''' Test delete existing cluster peer '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ data['source_cluster_name'] = 'cluster1'
+ set_module_args(data)
+ current1 = {
+ 'cluster_name': 'cluster1',
+ 'peer-addresses': '1.2.3.6,1.2.3.7'
+ }
+ current2 = {
+ 'cluster_name': 'cluster2',
+ 'peer-addresses': '1.2.3.4,1.2.3.5'
+ }
+ cluster_peer_get.side_effect = [
+ current1,
+ current2
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_cluster_peer_mock_object('cluster_peer').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster_peer.NetAppONTAPClusterPeer.cluster_peer_get')
+ def test_delete_idempotency(self, cluster_peer_get):
+ ''' Test delete idempotency '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ data['source_cluster_name'] = 'cluster2'
+ set_module_args(data)
+ cluster_peer_get.side_effect = [
+ None,
+ None
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_cluster_peer_mock_object().apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_command.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_command.py
new file mode 100644
index 00000000..bd13094b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_command.py
@@ -0,0 +1,205 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test for ONTAP Command Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_command \
+ import NetAppONTAPCommand as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ # print(xml.to_string())
+
+ if self.type == 'version':
+ priv = xml.get_child_content('priv')
+ xml = self.build_version(priv, self.parm1)
+
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_version(priv, result):
+ ''' build xml data for version '''
+ prefix = 'NetApp Release'
+ if priv == 'advanced':
+ prefix = '\n' + prefix
+ xml = netapp_utils.zapi.NaElement('results')
+ xml.add_attr('status', 'status_ok')
+ xml.add_new_child('cli-output', prefix)
+ if result == "u'77'":
+ xml.add_new_child('cli-result-value', u'77')
+ elif result == "b'77'":
+ xml.add_new_child('cli-result-value', b'77')
+ else:
+ xml.add_new_child('cli-result-value', b'7' if result is None else result)
+ # print('XML ut:', xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection(kind='version')
+ # whether to use a mock or a simulator
+ self.use_vsim = False
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @staticmethod
+ def set_default_args(vsim=False):
+ ''' populate hostname/username/password '''
+ if vsim:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'admin'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'https': True,
+ 'validate_certs': False
+ })
+
+ def call_command(self, module_args, vsim=False):
+ ''' utility function to call apply '''
+ module_args.update(self.set_default_args(vsim=vsim))
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not vsim:
+ # mock the connection
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ msg = exc.value.args[0]['msg']
+ return msg
+
+ def test_default_priv(self):
+ ''' make sure privilege is not required '''
+ module_args = {
+ 'command': 'version',
+ }
+ msg = self.call_command(module_args, vsim=self.use_vsim)
+ needle = b'<cli-output>NetApp Release'
+ assert needle in msg
+ print('Version (raw): %s' % msg)
+
+ def test_admin_priv(self):
+ ''' make sure admin is accepted '''
+ module_args = {
+ 'command': 'version',
+ 'privilege': 'admin',
+ }
+ msg = self.call_command(module_args, vsim=self.use_vsim)
+ needle = b'<cli-output>NetApp Release'
+ assert needle in msg
+ print('Version (raw): %s' % msg)
+
+ def test_advanced_priv(self):
+ ''' make sure advanced is not required '''
+ module_args = {
+ 'command': 'version',
+ 'privilege': 'advanced',
+ }
+ msg = self.call_command(module_args, vsim=self.use_vsim)
+ # Interestingly, the ZAPI returns a slightly different response
+ needle = b'<cli-output>\nNetApp Release'
+ assert needle in msg
+ print('Version (raw): %s' % msg)
+
+ def get_dict_output(self, result):
+ ''' get result value after calling command module '''
+ print('In:', result)
+ module_args = {
+ 'command': 'version',
+ 'return_dict': 'true',
+ }
+ self.server = MockONTAPConnection(kind='version', parm1=result)
+ dict_output = self.call_command(module_args, vsim=self.use_vsim)
+ print('dict_output: %s' % repr(dict_output))
+ return dict_output['result_value']
+
+ def test_dict_output_77(self):
+ ''' make sure correct value is returned '''
+ result = '77'
+ assert self.get_dict_output(result) == int(result)
+
+ def test_dict_output_b77(self):
+ ''' make sure correct value is returned '''
+ result = b'77'
+ assert self.get_dict_output(result) == int(result)
+
+ def test_dict_output_u77(self):
+ ''' make sure correct value is returned '''
+ result = "u'77'"
+ assert self.get_dict_output(result) == int(eval(result))
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_dns.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_dns.py
new file mode 100644
index 00000000..b514742b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_dns.py
@@ -0,0 +1,321 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_dns'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_dns \
+ import NetAppOntapDns as dns_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required"
+
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ 'dns_record': (200, {"records": [{"domains": ['test.com'],
+ "servers": ['0.0.0.0'],
+ "svm": {"name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}}]}, None),
+ 'cluster_data': (200, {"dns_domains": ['test.com'],
+ "name_servers": ['0.0.0.0'],
+ "name": "cserver",
+ "uuid": "C2c9e252-41be-11e9-81d5-00a0986138f7"}, None),
+ 'cluster_name': (200, {"name": "cserver"}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ request = xml.to_string().decode('utf-8')
+ if request.startswith("<ems-autosupport-log>"):
+ xml = None # or something that may the logger happy, and you don't need @patch anymore
+ # or
+ # xml = build_ems_log_response()
+ elif request == "<net-dns-get/>":
+ if self.kind == 'create':
+ raise netapp_utils.zapi.NaApiError(code="15661")
+ else:
+ xml = self.build_dns_status_info()
+ elif request.startswith("<net-dns-create>"):
+ xml = self.build_dns_status_info()
+ if self.kind == 'enable':
+ xml = self.build_dns_status_info()
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_dns_status_info():
+ xml = netapp_utils.zapi.NaElement('xml')
+ nameservers = [{'ip-address': '0.0.0.0'}]
+ domains = [{'string': 'test.com'}]
+ attributes = {'num-records': 1,
+ 'attributes': {'net-dns-info': {'name-servers': nameservers,
+ 'domains': domains,
+ 'skip-config-validation': 'false'}}}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def mock_args(self):
+ return {
+ 'state': 'present',
+ 'vserver': 'vserver',
+ 'nameservers': ['0.0.0.0'],
+ 'domains': ['test.com'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_dns_mock_object(self, cx_type='zapi', kind=None, status=None):
+ dns_obj = dns_module()
+ if cx_type == 'zapi':
+ if kind is None:
+ dns_obj.server = MockONTAPConnection()
+ else:
+ dns_obj.server = MockONTAPConnection(kind=kind, data=status)
+ return dns_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ dns_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_idempotent_modify_dns(self):
+ data = self.mock_args()
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object('zapi', 'enable', 'false').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_modify_dns(self):
+ data = self.mock_args()
+ data['domains'] = ['new_test.com']
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object('zapi', 'enable', 'false').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_idempotent_create_dns(self, mock_ems_log_event):
+ data = self.mock_args()
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object('zapi').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_successfully_create_dns(self, mock_ems_log_event):
+ data = self.mock_args()
+ print("create dns")
+ data['domains'] = ['new_test.com']
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object('zapi', 'create').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_create(self, mock_request):
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['cluster_data'], # get cluster
+ SRR['empty_good'], # post
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_create_is_cluster_vserver(self, mock_request):
+ data = self.mock_args()
+ data['vserver'] = 'cvserver'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['cluster_name'], # get cluster name
+ SRR['empty_good'], # post
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotent_create_dns(self, mock_request):
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['dns_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_destroy(self, mock_request):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['dns_record'], # get
+ SRR['empty_good'], # delete
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotently_destroy(self, mock_request):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['cluster_data'], # get cluster
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_modify(self, mock_request):
+ data = self.mock_args()
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['cluster_data'], # get cluster
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_modify_is_cluster_vserver(self, mock_request):
+ data = self.mock_args()
+ data['vserver'] = 'cvserver'
+ data['state'] = 'present'
+ data['domains'] = 'new_test.com'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['cluster_data'], # get cluster data
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotently_modify(self, mock_request):
+ data = self.mock_args()
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['dns_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dns_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py
new file mode 100644
index 00000000..9432ff96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py
@@ -0,0 +1,232 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_vscan_scanner_pool '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_efficiency_policy \
+ import NetAppOntapEfficiencyPolicy as efficiency_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'threshold':
+ xml = self.build_threshold_info(self.params)
+ elif self.kind == 'scheduled':
+ xml = self.build_schedule_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_threshold_info(details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {'num-records': 1,
+ 'attributes-list': {
+ 'sis-policy-info': {
+ 'changelog-threshold-percent': 10,
+ 'comment': details['comment'],
+ 'enabled': 'true',
+ 'policy-name': details['policy_name'],
+ 'policy-type': 'threshold',
+ 'qos-policy': details['qos_policy'],
+ 'vserver': details['vserver']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_schedule_info(details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {'num-records': 1,
+ 'attributes-list': {
+ 'sis-policy-info': {
+ 'comment': details['comment'],
+ 'duration': 10,
+ 'enabled': 'true',
+ 'policy-name': details['policy_name'],
+ 'policy-type': 'scheduled',
+ 'qos-policy': details['qos_policy'],
+ 'vserver': details['vserver']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_efficiency_policy '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_efficiency_policy = {
+ 'state': 'present',
+ 'vserver': 'test_vserver',
+ 'policy_name': 'test_policy',
+ 'comment': 'This policy is for x and y',
+ 'enabled': True,
+ 'qos_policy': 'background'
+ }
+
+ def mock_args(self):
+ return {
+ 'state': self.mock_efficiency_policy['state'],
+ 'vserver': self.mock_efficiency_policy['vserver'],
+ 'policy_name': self.mock_efficiency_policy['policy_name'],
+ 'comment': self.mock_efficiency_policy['comment'],
+ 'enabled': self.mock_efficiency_policy['enabled'],
+ 'qos_policy': self.mock_efficiency_policy['qos_policy'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_efficiency_mock_object(self, kind=None):
+ efficiency_obj = efficiency_module()
+ if kind is None:
+ efficiency_obj.server = MockONTAPConnection()
+ elif kind == 'threshold':
+ efficiency_obj.server = MockONTAPConnection(kind='threshold', data=self.mock_efficiency_policy)
+ elif kind == 'scheduled':
+ efficiency_obj.server = MockONTAPConnection(kind='scheduled', data=self.mock_efficiency_policy)
+ return efficiency_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ efficiency_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_efficiency_policy(self):
+ set_module_args(self.mock_args())
+ result = self.get_efficiency_mock_object().get_efficiency_policy()
+ assert not result
+
+ def test_get_existing_scanner(self):
+ set_module_args(self.mock_args())
+ result = self.get_efficiency_mock_object('threshold').get_efficiency_policy()
+ assert result
+
+ def test_successfully_create(self):
+ data = self.mock_args()
+ data['policy_type'] = 'threshold'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_efficiency_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ data = self.mock_args()
+ data['policy_type'] = 'threshold'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_efficiency_mock_object('threshold').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_threshold_duration_failure(self):
+ data = self.mock_args()
+ data['duration'] = 1
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_efficiency_mock_object('threshold').apply()
+ assert exc.value.args[0]['msg'] == "duration cannot be set if policy_type is threshold"
+
+ def test_threshold_schedule_failure(self):
+ data = self.mock_args()
+ data['schedule'] = 'test_job_schedule'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_efficiency_mock_object('threshold').apply()
+ assert exc.value.args[0]['msg'] == "schedule cannot be set if policy_type is threshold"
+
+ def test_scheduled_threshold_percent_failure(self):
+ data = self.mock_args()
+ data['changelog_threshold_percent'] = 30
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_efficiency_mock_object('scheduled').apply()
+ assert exc.value.args[0]['msg'] == "changelog_threshold_percent cannot be set if policy_type is scheduled"
+
+ def test_successfully_delete(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_efficiency_mock_object('threshold').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_efficiency_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ data = self.mock_args()
+ data['policy_type'] = 'threshold'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_efficiency_mock_object('scheduled').apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy.py
new file mode 100644
index 00000000..04620856
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy.py
@@ -0,0 +1,289 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_volume_export_policy '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy \
+ import NetAppONTAPExportPolicy as policy_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_uuid_policy_id_export_policy': (
+ 200,
+ {
+ "records": [{
+ "svm": {
+ "uuid": "uuid",
+ "name": "svm"},
+ "id": 123,
+ "name": "ansible"
+ }],
+ "num_records": 1}, None),
+ "no_record": (
+ 200,
+ {"num_records": 0},
+ None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'export_policy':
+ xml = self.build_export_policy_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_export_policy_info(export_policy_details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'export-policy-info': {'name': export_policy_details['name']
+ }}}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_export_policy = {
+ 'name': 'test_policy',
+ 'vserver': 'test_vserver'
+ }
+
+ def mock_args(self, rest=False):
+ if rest:
+ return {
+ 'vserver': self.mock_export_policy['vserver'],
+ 'name': self.mock_export_policy['name'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+ else:
+ return {
+ 'vserver': self.mock_export_policy['vserver'],
+ 'name': self.mock_export_policy['name'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'use_rest': 'never'
+ }
+
+ def get_export_policy_mock_object(self, cx_type='zapi', kind=None):
+ policy_obj = policy_module()
+ if cx_type == 'zapi':
+ if kind is None:
+ policy_obj.server = MockONTAPConnection()
+ elif kind == 'export_policy':
+ policy_obj.server = MockONTAPConnection(kind='export_policy', data=self.mock_export_policy)
+ return policy_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ policy_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy.NetAppONTAPExportPolicy.create_export_policy')
+ def test_successful_create(self, create_export_policy):
+ ''' Test successful create '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_export_policy_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ create_export_policy.assert_called_with(uuid=None)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy.NetAppONTAPExportPolicy.get_export_policy')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy.NetAppONTAPExportPolicy.rename_export_policy')
+ def test_successful_rename(self, rename_export_policy, get_export_policy):
+ ''' Test successful rename '''
+ data = self.mock_args()
+ data['from_name'] = 'old_policy'
+ set_module_args(data)
+ get_export_policy.side_effect = [
+ None,
+ {'policy-name': 'old_policy'}
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_export_policy_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ rename_export_policy.assert_called_with(policy_id=None)
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ '''Test successful rest create'''
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['no_record'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_export_policy_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_delete(self, mock_request):
+ '''Test successful rest delete'''
+ data = self.mock_args(rest=True)
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_export_policy_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_rename(self, mock_request):
+ '''Test successful rest rename'''
+ data = self.mock_args(rest=True)
+ data['from_name'] = 'ansible'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['no_record'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_export_policy_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error_create(self, mock_request):
+ '''Test error rest create'''
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['no_record'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_export_policy_mock_object(cx_type='rest').apply()
+ assert 'Error on creating export policy: Expected error' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error_delete(self, mock_request):
+ '''Test error rest delete'''
+ data = self.mock_args(rest=True)
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_export_policy_mock_object(cx_type='rest').apply()
+ assert 'Error on deleting export policy: Expected error' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error_rename(self, mock_request):
+ '''Test error rest rename'''
+ data = self.mock_args(rest=True)
+ data['from_name'] = 'ansible'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['no_record'],
+ SRR['get_uuid_policy_id_export_policy'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_export_policy_mock_object(cx_type='rest').apply()
+ assert 'Error on renaming export policy: Expected error' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py
new file mode 100644
index 00000000..016dd68a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py
@@ -0,0 +1,269 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy_rule \
+ import NetAppontapExportRule as policy_rule # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'rule':
+ xml = self.build_policy_rule(self.data)
+ if self.kind == 'rules':
+ xml = self.build_policy_rule(self.data, multiple=True)
+ if self.kind == 'policy':
+ xml = self.build_policy()
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_policy_rule(policy, multiple=False):
+ ''' build xml data for export-rule-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {'attributes-list': {
+ 'export-rule-info': {
+ 'policy-name': policy['name'],
+ 'client-match': policy['client_match'],
+ 'ro-rule': {
+ 'security-flavor': 'any'
+ },
+ 'rw-rule': {
+ 'security-flavor': 'any'
+ },
+ 'protocol': {
+ 'access-protocol': policy['protocol']
+ },
+ 'super-user-security': {
+ 'security-flavor': 'any'
+ },
+ 'is-allow-set-uid-enabled': 'false',
+ 'rule-index': policy['rule_index'],
+ 'anonymous-user-id': policy['anonymous_user_id'],
+
+ }
+ }, 'num-records': 2 if multiple is True else 1}
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_policy():
+ ''' build xml data for export-policy-get-iter '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_rule = {
+ 'name': 'test',
+ 'protocol': 'nfs',
+ 'client_match': '1.1.1.0',
+ 'rule_index': 10,
+ 'anonymous_user_id': '65534'
+ }
+
+ def mock_rule_args(self):
+ return {
+ 'name': self.mock_rule['name'],
+ 'client_match': self.mock_rule['client_match'],
+ 'vserver': 'test',
+ 'protocol': self.mock_rule['protocol'],
+ 'rule_index': self.mock_rule['rule_index'],
+ 'anonymous_user_id': self.mock_rule['anonymous_user_id'],
+ 'ro_rule': 'any',
+ 'rw_rule': 'any',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_firewall_policy object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_firewall_policy object
+ """
+ obj = policy_rule()
+ obj.autosupport_log = Mock(return_value=None)
+ if kind is None:
+ obj.server = MockONTAPConnection()
+ else:
+ obj.server = MockONTAPConnection(kind=kind, data=self.mock_rule_args())
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ policy_rule()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_rule(self):
+ ''' Test if get_export_policy_rule returns None for non-existent policy '''
+ set_module_args(self.mock_rule_args())
+ result = self.get_mock_object().get_export_policy_rule()
+ assert result is None
+
+ def test_get_nonexistent_policy(self):
+ ''' Test if get_export_policy returns None for non-existent policy '''
+ set_module_args(self.mock_rule_args())
+ result = self.get_mock_object().get_export_policy()
+ assert result is None
+
+ def test_get_existing_rule(self):
+ ''' Test if get_export_policy_rule returns rule details for existing policy '''
+ data = self.mock_rule_args()
+ set_module_args(data)
+ result = self.get_mock_object('rule').get_export_policy_rule()
+ assert result['name'] == data['name']
+ assert result['client_match'] == data['client_match']
+ assert result['ro_rule'] == ['any'] # from build_rule()
+
+ def test_get_existing_policy(self):
+ ''' Test if get_export_policy returns policy details for existing policy '''
+ data = self.mock_rule_args()
+ set_module_args(data)
+ result = self.get_mock_object('policy').get_export_policy()
+ assert result is not None
+
+ def test_create_missing_param_error(self):
+ ''' Test validation error from create '''
+ data = self.mock_rule_args()
+ del data['ro_rule']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_mock_object().apply()
+ msg = 'Error: Missing required param for creating export policy rule ro_rule'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ set_module_args(self.mock_rule_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_rule_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('rule').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_delete_without_rule_index(self):
+ ''' Test delete existing job '''
+ data = self.mock_rule_args()
+ data['state'] = 'absent'
+ del data['rule_index']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('rule').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ ''' Test delete idempotency '''
+ data = self.mock_rule_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ ''' Test successful modify protocol '''
+ data = self.mock_rule_args()
+ data['protocol'] = ['cifs']
+ data['allow_suid'] = 'true'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('rule').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_error_on_ambiguous_delete(self):
+ ''' Test error if multiple entries match for a delete '''
+ data = self.mock_rule_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_mock_object('rules').apply()
+ msg = "Multiple export policy rules exist.Please specify a rule_index to delete"
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_helper_query_parameters(self):
+ ''' Test helper method set_query_parameters() '''
+ data = self.mock_rule_args()
+ set_module_args(data)
+ result = self.get_mock_object('rule').set_query_parameters()
+ print(str(result))
+ assert 'query' in result
+ assert 'export-rule-info' in result['query']
+ assert result['query']['export-rule-info']['rule-index'] == data['rule_index']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py
new file mode 100644
index 00000000..70354edc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py
@@ -0,0 +1,173 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_file_directory_policy '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_file_directory_policy \
+ import NetAppOntapFilePolicy as policy_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ request = xml.to_string().decode('utf-8')
+ if self.kind == 'error':
+ raise netapp_utils.zapi.NaApiError('test', 'expect error')
+ elif request.startswith("<ems-autosupport-log>"):
+ xml = None # or something that may the logger happy, and you don't need @patch anymore
+ # or
+ # xml = build_ems_log_response()
+ elif request.startswith("<file-directory-security-policy-get-iter>"):
+ if self.kind == 'create':
+ xml = self.build_sd_info()
+ else:
+ xml = self.build_sd_info(self.params)
+ elif request.startswith("<file-directory-security-ntfs-modify>"):
+ xml = self.build_sd_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_sd_info(data=None):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {}
+ if data is not None:
+ attributes = {'num-records': 1,
+ 'attributes-list': {'file-directory-security-policy': {'policy-name': data['policy_name']}}}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_file_directory_policy '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def mock_args(self):
+ return {
+ 'vserver': 'vserver',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_policy_mock_object(self, type='zapi', kind=None, status=None):
+ policy_obj = policy_module()
+ netapp_utils.ems_log_event = Mock(return_value=None)
+ if type == 'zapi':
+ if kind is None:
+ policy_obj.server = MockONTAPConnection()
+ else:
+ policy_obj.server = MockONTAPConnection(kind=kind, data=status)
+ return policy_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ policy_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_successfully_create_policy(self):
+ data = self.mock_args()
+ data['policy_name'] = 'test_policy'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_mock_object('zapi', 'create', data).apply()
+ assert exc.value.args[0]['changed']
+
+ def test_error(self):
+ data = self.mock_args()
+ data['policy_name'] = 'test_policy'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_mock_object('zapi', 'error', data).get_policy_iter()
+ assert exc.value.args[0]['msg'] == 'Error fetching file-directory policy test_policy: NetApp API failed. Reason - test:expect error'
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_mock_object('zapi', 'error', data).create_policy()
+ assert exc.value.args[0]['msg'] == 'Error creating file-directory policy test_policy: NetApp API failed. Reason - test:expect error'
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_mock_object('zapi', 'error', data).remove_policy()
+ assert exc.value.args[0]['msg'] == 'Error removing file-directory policy test_policy: NetApp API failed. Reason - test:expect error'
+
+ data['path'] = '/vol'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_mock_object('zapi', 'error', data).get_task_iter()
+ assert exc.value.args[0]['msg'] == 'Error fetching task from file-directory policy test_policy: NetApp API failed. Reason - test:expect error'
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_mock_object('zapi', 'error', data).add_task_to_policy()
+ assert exc.value.args[0]['msg'] == 'Error adding task to file-directory policy test_policy: NetApp API failed. Reason - test:expect error'
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_mock_object('zapi', 'error', data).remove_task_from_policy()
+ assert exc.value.args[0]['msg'] == 'Error removing task from file-directory policy test_policy: NetApp API failed. Reason - test:expect error'
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_mock_object('zapi', 'error', data).modify_task(dict())
+ assert exc.value.args[0]['msg'] == 'Error modifying task in file-directory policy test_policy: NetApp API failed. Reason - test:expect error'
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_mock_object('zapi', 'error', data).set_sd()
+ assert exc.value.args[0]['msg'] == 'Error applying file-directory policy test_policy: NetApp API failed. Reason - test:expect error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firewall_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firewall_policy.py
new file mode 100644
index 00000000..51e4d06d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firewall_policy.py
@@ -0,0 +1,296 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_firewall_policy \
+ import NetAppONTAPFirewallPolicy as fp_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'policy':
+ xml = self.build_policy_info(self.data)
+ if self.kind == 'config':
+ xml = self.build_firewall_config_info(self.data)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_policy_info(data):
+ ''' build xml data for net-firewall-policy-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'net-firewall-policy-info': {
+ 'policy': data['policy'],
+ 'service': data['service'],
+ 'allow-list': [
+ {'ip-and-mask': '1.2.3.0/24'}
+ ]
+ }
+ }
+ }
+
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_firewall_config_info(data):
+ ''' build xml data for net-firewall-config-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'attributes': {
+ 'net-firewall-config-info': {
+ 'is-enabled': 'true',
+ 'is-logging': 'false'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_policy = {
+ 'policy': 'test',
+ 'service': 'http',
+ 'vserver': 'my_vserver',
+ 'allow_list': '1.2.3.0/24'
+ }
+ self.mock_config = {
+ 'node': 'test',
+ 'enable': 'enable',
+ 'logging': 'enable'
+ }
+
+ def mock_policy_args(self):
+ return {
+ 'policy': self.mock_policy['policy'],
+ 'service': self.mock_policy['service'],
+ 'vserver': self.mock_policy['vserver'],
+ 'allow_list': [self.mock_policy['allow_list']],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def mock_config_args(self):
+ return {
+ 'node': self.mock_config['node'],
+ 'enable': self.mock_config['enable'],
+ 'logging': self.mock_config['logging'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_firewall_policy object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_firewall_policy object
+ """
+ obj = fp_module()
+ obj.autosupport_log = Mock(return_value=None)
+ if kind is None:
+ obj.server = MockONTAPConnection()
+ else:
+ mock_data = self.mock_config if kind == 'config' else self.mock_policy
+ obj.server = MockONTAPConnection(kind=kind, data=mock_data)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ fp_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_helper_firewall_policy_attributes(self):
+ ''' helper returns dictionary with vserver, service and policy details '''
+ data = self.mock_policy
+ set_module_args(self.mock_policy_args())
+ result = self.get_mock_object('policy').firewall_policy_attributes()
+ del data['allow_list']
+ assert data == result
+
+ def test_helper_validate_ip_addresses_positive(self):
+ ''' test if helper validates if IP is a network address '''
+ data = self.mock_policy_args()
+ data['allow_list'] = ['1.2.0.0/16', '1.2.3.0/24']
+ set_module_args(data)
+ result = self.get_mock_object().validate_ip_addresses()
+ assert result is None
+
+ def test_helper_validate_ip_addresses_negative(self):
+ ''' test if helper validates if IP is a network address '''
+ data = self.mock_policy_args()
+ data['allow_list'] = ['1.2.0.10/16', '1.2.3.0/24']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_mock_object().validate_ip_addresses()
+ msg = 'Error: Invalid IP address value for allow_list parameter.' \
+ 'Please specify a network address without host bits set: ' \
+ '1.2.0.10/16 has host bits set'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_get_nonexistent_policy(self):
+ ''' Test if get_firewall_policy returns None for non-existent policy '''
+ set_module_args(self.mock_policy_args())
+ result = self.get_mock_object().get_firewall_policy()
+ assert result is None
+
+ def test_get_existing_policy(self):
+ ''' Test if get_firewall_policy returns policy details for existing policy '''
+ data = self.mock_policy_args()
+ set_module_args(data)
+ result = self.get_mock_object('policy').get_firewall_policy()
+ assert result['service'] == data['service']
+ assert result['allow_list'] == ['1.2.3.0/24'] # from build_policy_info()
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ set_module_args(self.mock_policy_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_policy_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('policy').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_delete(self):
+ ''' Test delete existing job '''
+ data = self.mock_policy_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ ''' Test delete idempotency '''
+ data = self.mock_policy_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ ''' Test successful modify allow_list '''
+ data = self.mock_policy_args()
+ data['allow_list'] = ['1.2.0.0/16']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_mutiple_ips(self):
+ ''' Test successful modify allow_list '''
+ data = self.mock_policy_args()
+ data['allow_list'] = ['1.2.0.0/16', '1.0.0.0/8']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_mutiple_ips_contain_existing(self):
+ ''' Test successful modify allow_list '''
+ data = self.mock_policy_args()
+ data['allow_list'] = ['1.2.3.0/24', '1.0.0.0/8']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_get_nonexistent_config(self):
+ ''' Test if get_firewall_config returns None for non-existent node '''
+ set_module_args(self.mock_config_args())
+ result = self.get_mock_object().get_firewall_config_for_node()
+ assert result is None
+
+ def test_get_existing_config(self):
+ ''' Test if get_firewall_config returns policy details for existing node '''
+ data = self.mock_config_args()
+ set_module_args(data)
+ result = self.get_mock_object('config').get_firewall_config_for_node()
+ assert result['enable'] == 'enable' # from build_config_info()
+ assert result['logging'] == 'disable' # from build_config_info()
+
+ def test_successful_modify_config(self):
+ ''' Test successful modify allow_list '''
+ data = self.mock_config_args()
+ data['enable'] = 'disable'
+ data['logging'] = 'enable'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_mock_object('config').apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py
new file mode 100644
index 00000000..223bb5b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py
@@ -0,0 +1,436 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_firmware_upgrade '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade\
+ import NetAppONTAPFirmwareUpgrade as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None, parm2=None, parm3=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.parm2 = parm2
+ # self.parm3 = parm3
+ self.xml_in = None
+ self.xml_out = None
+ self.firmware_type = 'None'
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ # print('xml_in', xml.to_string())
+ if self.type == 'firmware_upgrade':
+ xml = self.build_firmware_upgrade_info(self.parm1, self.parm2)
+ if self.type == 'acp':
+ xml = self.build_acp_firmware_info(self.firmware_type)
+ if self.type == 'firmware_download':
+ xml = self.build_system_cli_info(error=self.parm1)
+ if self.type == 'firmware_download_exception':
+ raise netapp_utils.zapi.NaApiError(self.parm1, self.parm2)
+ self.xml_out = xml
+ return xml
+
+ def autosupport_log(self):
+ ''' mock autosupport log'''
+ return None
+
+ @staticmethod
+ def build_firmware_upgrade_info(version, node):
+ ''' build xml data for service-processor firmware info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {
+ 'num-records': 1,
+ 'attributes-list': {'service-processor-info': {'firmware-version': '3.4'}}
+ }
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+ @staticmethod
+ def build_acp_firmware_info(firmware_type):
+ ''' build xml data for acp firmware info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {
+ # 'num-records': 1,
+ 'attributes-list': {'storage-shelf-acp-module': {'state': 'firmware_update_required'}}
+ }
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+ @staticmethod
+ def build_system_cli_info(error=None):
+ ''' build xml data for system-cli info '''
+ if error is None:
+ # make it a string, to be able to compare easily
+ error = ""
+ xml = netapp_utils.zapi.NaElement('results')
+ if error == 'empty_output':
+ output = ""
+ else:
+ output = 'Download complete.'
+ data = {
+ 'cli-output': output,
+ 'cli-result-value': 1
+ }
+ xml.translate_struct(data)
+ if error == 'status_failed':
+ status = "failed"
+ else:
+ status = "passed"
+ if error != 'no_status_attr':
+ xml.add_attr('status', status)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.use_vsim = False
+
+ def set_default_args(self):
+ if self.use_vsim:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'admin'
+ node = 'vsim1'
+ clear_logs = True
+ package = 'test1.zip'
+ install_baseline_image = False
+ update_type = 'serial_full'
+ force_disruptive_update = False
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ node = 'abc'
+ package = 'test1.zip'
+ clear_logs = True
+ install_baseline_image = False
+ update_type = 'serial_full'
+ force_disruptive_update = False
+
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'node': node,
+ 'package': package,
+ 'clear_logs': clear_logs,
+ 'install_baseline_image': install_baseline_image,
+ 'update_type': update_type,
+ 'https': 'true',
+ 'force_disruptive_update': force_disruptive_update
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_invalid_firmware_type_parameters(self):
+ ''' fail if firmware_type is missing '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'firmware_type': 'service_test'})
+ set_module_args(module_args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(module_args)
+ my_module()
+ msg = 'value of firmware_type must be one of: service-processor, shelf, acp, disk, got: %s' % module_args['firmware_type']
+ print('Info: %s' % exc.value.args[0]['msg'])
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_ensure_sp_firmware_get_called(self):
+ ''' a more interesting test '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'firmware_type': 'service-processor'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.server = self.server
+ firmware_image_get = my_obj.firmware_image_get('node')
+ print('Info: test_firmware_upgrade_get: %s' % repr(firmware_image_get))
+ assert firmware_image_get is None
+
+ def test_ensure_firmware_get_with_package_baseline_called(self):
+ ''' a more interesting test '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'firmware_type': 'service-processor'})
+ module_args.update({'package': 'test1.zip'})
+ module_args.update({'install_baseline_image': True})
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(module_args)
+ my_module()
+ msg = 'Do not specify both package and install_baseline_image: true'
+ print('info: ' + exc.value.args[0]['msg'])
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_ensure_acp_firmware_required_get_called(self):
+ ''' a test tp verify acp firmware upgrade is required or not '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'firmware_type': 'acp'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ # my_obj.server = self.server
+ my_obj.server = MockONTAPConnection(kind='acp')
+ acp_firmware_required_get = my_obj.acp_firmware_required_get()
+ print('Info: test_acp_firmware_upgrade_required_get: %s' % repr(acp_firmware_required_get))
+ assert acp_firmware_required_get is True
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.sp_firmware_image_update')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.sp_firmware_image_update_progress_get')
+ def test_ensure_apply_for_firmware_upgrade_called(self, get_mock, update_mock):
+ ''' updgrading firmware and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package': 'test1.zip'})
+ module_args.update({'firmware_type': 'service-processor'})
+ module_args.update({'force_disruptive_update': True})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_upgrade', '3.5', 'true')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+ update_mock.assert_called_with()
+
+ def test_shelf_firmware_upgrade(self):
+ ''' Test shelf firmware upgrade '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'firmware_type': 'shelf'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.acp_firmware_upgrade')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.acp_firmware_required_get')
+ def test_acp_firmware_upgrade(self, get_mock, update_mock):
+ ''' Test ACP firmware upgrade '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'firmware_type': 'acp'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.disk_firmware_upgrade')
+ def test_disk_firmware_upgrade(self, get_mock):
+ ''' Test disk firmware upgrade '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'firmware_type': 'disk'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ def test_firmware_download(self):
+ ''' Test firmware download '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_url': 'dummy_url'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_download')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ msg = "Firmware download completed. Extra info: Download complete."
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_firmware_download_60(self):
+ ''' Test firmware download '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_url': 'dummy_url'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_download_exception', 60, 'ZAPI timeout')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ msg = "Firmware download completed, slowly."
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_firmware_download_502(self):
+ ''' Test firmware download '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_url': 'dummy_url'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_download_exception', 502, 'Bad GW')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ msg = "Firmware download still in progress."
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_firmware_download_502_as_error(self):
+ ''' Test firmware download '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_url': 'dummy_url'})
+ module_args.update({'fail_on_502_error': True})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_download_exception', 502, 'Bad GW')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "NetApp API failed. Reason - 502:Bad GW"
+ assert msg in exc.value.args[0]['msg']
+
+ def test_firmware_download_no_num_error(self):
+ ''' Test firmware download '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_url': 'dummy_url'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_download_exception', 'some error string', 'whatever')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "NetApp API failed. Reason - some error string:whatever"
+ assert msg in exc.value.args[0]['msg']
+
+ def test_firmware_download_no_status_attr(self):
+ ''' Test firmware download '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_url': 'dummy_url'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_download', 'no_status_attr')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "unable to download package from dummy_url: 'status' attribute missing."
+ assert exc.value.args[0]['msg'].startswith(msg)
+
+ def test_firmware_download_status_failed(self):
+ ''' Test firmware download '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_url': 'dummy_url'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_download', 'status_failed')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "unable to download package from dummy_url: check 'status' value."
+ assert exc.value.args[0]['msg'].startswith(msg)
+
+ def test_firmware_download_empty_output(self):
+ ''' Test firmware download '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_url': 'dummy_url'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('firmware_download', 'empty_output')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "unable to download package from dummy_url: check console permissions."
+ assert exc.value.args[0]['msg'].startswith(msg)
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_flexcache.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_flexcache.py
new file mode 100644
index 00000000..ae93f142
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_flexcache.py
@@ -0,0 +1,531 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test for ONTAP FlexCache Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_flexcache \
+ import NetAppONTAPFlexCache as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None, api_error=None, job_error=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.api_error = api_error
+ self.job_error = job_error
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ tag = xml.get_name()
+ if tag == 'flexcache-get-iter' and self.type == 'vserver':
+ xml = self.build_flexcache_info(self.parm1)
+ elif tag == 'flexcache-create-async':
+ xml = self.build_flexcache_create_destroy_rsp()
+ elif tag == 'flexcache-destroy-async':
+ if self.api_error:
+ code, message = self.api_error.split(':', 2)
+ raise netapp_utils.zapi.NaApiError(code, message)
+ xml = self.build_flexcache_create_destroy_rsp()
+ elif tag == 'job-get':
+ xml = self.build_job_info(self.job_error)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_flexcache_info(vserver):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('attributes-list')
+ count = 2 if vserver == 'repeats' else 1
+ for dummy in range(count):
+ attributes.add_node_with_children('flexcache-info', **{
+ 'vserver': vserver,
+ 'origin-vserver': 'ovserver',
+ 'origin-volume': 'ovolume',
+ 'origin-cluster': 'ocluster',
+ 'volume': 'volume',
+ })
+ xml.add_child_elem(attributes)
+ xml.add_new_child('num-records', str(count))
+ return xml
+
+ @staticmethod
+ def build_flexcache_create_destroy_rsp():
+ ''' build xml data for a create or destroy response '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ xml.add_new_child('result-status', 'in_progress')
+ xml.add_new_child('result-jobid', '1234')
+ return xml
+
+ @staticmethod
+ def build_job_info(error):
+ ''' build xml data for a job '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('attributes')
+ if error is None:
+ state = 'success'
+ elif error == 'time_out':
+ state = 'running'
+ else:
+ state = 'failure'
+ attributes.add_node_with_children('job-info', **{
+ 'job-state': state,
+ 'job-progress': 'dummy',
+ 'job-completion': error,
+ })
+ xml.add_child_elem(attributes)
+ xml.add_new_child('result-status', 'in_progress')
+ xml.add_new_child('result-jobid', '1234')
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ # make sure to change this to False before submitting
+ self.onbox = False
+ self.dummy_args = dict()
+ for arg in ('hostname', 'username', 'password'):
+ self.dummy_args[arg] = arg
+ if self.onbox:
+ self.args = {
+ 'hostname': '10.193.78.219',
+ 'username': 'admin',
+ 'password': 'netapp1!',
+ }
+ else:
+ self.args = self.dummy_args
+ self.server = MockONTAPConnection()
+
+ def create_flexcache(self, vserver, volume, junction_path):
+ ''' create flexcache '''
+ if not self.onbox:
+ return
+ args = {
+ 'state': 'present',
+ 'volume': volume,
+ 'size': '90', # 80MB minimum
+ 'size_unit': 'mb', # 80MB minimum
+ 'vserver': vserver,
+ 'aggr_list': 'aggr1',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ 'junction_path': junction_path,
+ }
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ try:
+ my_obj.apply()
+ except AnsibleExitJson as exc:
+ print('Create util: ' + repr(exc))
+ except AnsibleFailJson as exc:
+ print('Create util: ' + repr(exc))
+
+ def delete_flexcache(self, vserver, volume):
+ ''' delete flexcache '''
+ if not self.onbox:
+ return
+ args = {'volume': volume, 'vserver': vserver, 'state': 'absent', 'force_offline': 'true'}
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ try:
+ my_obj.apply()
+ except AnsibleExitJson as exc:
+ print('Delete util: ' + repr(exc))
+ except AnsibleFailJson as exc:
+ print('Delete util: ' + repr(exc))
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_missing_parameters(self):
+ ''' fail if origin volume and origin verser are missing '''
+ args = {
+ 'vserver': 'vserver',
+ 'volume': 'volume'
+ }
+ args.update(self.dummy_args)
+ set_module_args(args)
+ my_obj = my_module()
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # Hint: start with get methods, as they are called first
+ my_obj.apply()
+ msg = 'Missing parameters: origin_volume, origin_vserver'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_missing_parameter(self):
+ ''' fail if origin verser parameter is missing '''
+ args = {
+ 'vserver': 'vserver',
+ 'origin_volume': 'origin_volume',
+ 'volume': 'volume'
+ }
+ args.update(self.dummy_args)
+ set_module_args(args)
+ my_obj = my_module()
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = 'Missing parameter: origin_vserver'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_get_flexcache(self):
+ ''' get flexcache info '''
+ args = {
+ 'vserver': 'ansibleSVM',
+ 'origin_volume': 'origin_volume',
+ 'volume': 'volume'
+ }
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('vserver')
+ info = my_obj.flexcache_get()
+ print('info: ' + repr(info))
+
+ def test_get_flexcache_double(self):
+ ''' get flexcache info returns 2 entries! '''
+ args = {
+ 'vserver': 'ansibleSVM',
+ 'origin_volume': 'origin_volume',
+ 'volume': 'volume'
+ }
+ args.update(self.dummy_args)
+ set_module_args(args)
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection('vserver', 'repeats')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.flexcache_get()
+ msg = 'Error fetching FlexCache info: Multiple records found for %s:' % args['volume']
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_create_flexcache(self):
+ ''' create flexcache '''
+ args = {
+ 'volume': 'volume',
+ 'size': '90', # 80MB minimum
+ 'size_unit': 'mb', # 80MB minimum
+ 'vserver': 'ansibleSVM',
+ 'aggr_list': 'aggr1',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ }
+ self.delete_flexcache(args['vserver'], args['volume'])
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection()
+ with patch.object(my_module, 'flexcache_create', wraps=my_obj.flexcache_create) as mock_create:
+ # with patch('__main__.my_module.flexcache_create', wraps=my_obj.flexcache_create) as mock_create:
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Create: ' + repr(exc.value))
+ assert exc.value.args[0]['changed']
+ mock_create.assert_called_with()
+
+ def test_create_flexcache_idempotent(self):
+ ''' create flexcache - already exists '''
+ args = {
+ 'volume': 'volume',
+ 'vserver': 'ansibleSVM',
+ 'aggr_list': 'aggr1',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ }
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('vserver')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Create: ' + repr(exc.value))
+ assert exc.value.args[0]['changed'] is False
+
+ def test_create_flexcache_autoprovision(self):
+ ''' create flexcache with autoprovision'''
+ args = {
+ 'volume': 'volume',
+ 'size': '90', # 80MB minimum
+ 'size_unit': 'mb', # 80MB minimum
+ 'vserver': 'ansibleSVM',
+ 'auto_provision_as': 'flexgroup',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ }
+ self.delete_flexcache(args['vserver'], args['volume'])
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection()
+ with patch.object(my_module, 'flexcache_create', wraps=my_obj.flexcache_create) as mock_create:
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Create: ' + repr(exc.value))
+ assert exc.value.args[0]['changed']
+ mock_create.assert_called_with()
+
+ def test_create_flexcache_autoprovision_idempotent(self):
+ ''' create flexcache with autoprovision - already exists '''
+ args = {
+ 'volume': 'volume',
+ 'vserver': 'ansibleSVM',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ 'auto_provision_as': 'flexgroup',
+ }
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('vserver')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Create: ' + repr(exc.value))
+ assert exc.value.args[0]['changed'] is False
+
+ def test_create_flexcache_multiplier(self):
+ ''' create flexcache with aggregate multiplier'''
+ args = {
+ 'volume': 'volume',
+ 'size': '90', # 80MB minimum
+ 'size_unit': 'mb', # 80MB minimum
+ 'vserver': 'ansibleSVM',
+ 'aggr_list': 'aggr1',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ 'aggr_list_multiplier': '2',
+ }
+ self.delete_flexcache(args['vserver'], args['volume'])
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection()
+ with patch.object(my_module, 'flexcache_create', wraps=my_obj.flexcache_create) as mock_create:
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Create: ' + repr(exc.value))
+ assert exc.value.args[0]['changed']
+ mock_create.assert_called_with()
+
+ def test_create_flexcache_multiplier_idempotent(self):
+ ''' create flexcache with aggregate multiplier - already exists '''
+ args = {
+ 'volume': 'volume',
+ 'vserver': 'ansibleSVM',
+ 'aggr_list': 'aggr1',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ 'aggr_list_multiplier': '2',
+ }
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('vserver')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Create: ' + repr(exc.value))
+ assert exc.value.args[0]['changed'] is False
+
+ def test_delete_flexcache_exists_no_force(self):
+ ''' delete flexcache '''
+ args = {'volume': 'volume', 'vserver': 'ansibleSVM', 'state': 'absent'}
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ error = '13001:Volume volume in Vserver ansibleSVM must be offline to be deleted. ' \
+ 'Use "volume offline -vserver ansibleSVM -volume volume" command to offline ' \
+ 'the volume'
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('vserver', 'flex', api_error=error)
+ with patch.object(my_module, 'flexcache_delete', wraps=my_obj.flexcache_delete) as mock_delete:
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print('Delete: ' + repr(exc.value))
+ msg = 'Error deleting FlexCache : NetApp API failed. Reason - %s' % error
+ assert exc.value.args[0]['msg'] == msg
+ mock_delete.assert_called_with()
+
+ def test_delete_flexcache_exists_with_force(self):
+ ''' delete flexcache '''
+ args = {'volume': 'volume', 'vserver': 'ansibleSVM', 'state': 'absent', 'force_offline': 'true'}
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('vserver', 'flex')
+ with patch.object(my_module, 'flexcache_delete', wraps=my_obj.flexcache_delete) as mock_delete:
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Delete: ' + repr(exc.value))
+ assert exc.value.args[0]['changed']
+ mock_delete.assert_called_with()
+
+ def test_delete_flexcache_exists_junctionpath_no_force(self):
+ ''' delete flexcache '''
+ args = {'volume': 'volume', 'vserver': 'ansibleSVM', 'junction_path': 'jpath', 'state': 'absent', 'force_offline': 'true'}
+ self.create_flexcache(args['vserver'], args['volume'], args['junction_path'])
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ error = '160:Volume volume on Vserver ansibleSVM must be unmounted before being taken offline or restricted.'
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('vserver', 'flex', api_error=error)
+ with patch.object(my_module, 'flexcache_delete', wraps=my_obj.flexcache_delete) as mock_delete:
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print('Delete: ' + repr(exc.value))
+ msg = 'Error deleting FlexCache : NetApp API failed. Reason - %s' % error
+ assert exc.value.args[0]['msg'] == msg
+ mock_delete.assert_called_with()
+
+ def test_delete_flexcache_exists_junctionpath_with_force(self):
+ ''' delete flexcache '''
+ args = {'volume': 'volume', 'vserver': 'ansibleSVM', 'junction_path': 'jpath', 'state': 'absent', 'force_offline': 'true', 'force_unmount': 'true'}
+ self.create_flexcache(args['vserver'], args['volume'], args['junction_path'])
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('vserver', 'flex')
+ with patch.object(my_module, 'flexcache_delete', wraps=my_obj.flexcache_delete) as mock_delete:
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Delete: ' + repr(exc.value))
+ assert exc.value.args[0]['changed']
+ mock_delete.assert_called_with()
+
+ def test_delete_flexcache_not_exist(self):
+ ''' delete flexcache '''
+ args = {'volume': 'volume', 'vserver': 'ansibleSVM', 'state': 'absent'}
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Delete: ' + repr(exc.value))
+ assert exc.value.args[0]['changed'] is False
+
+ def test_create_flexcache_size_error(self):
+ ''' create flexcache '''
+ args = {
+ 'volume': 'volume_err',
+ 'size': '50', # 80MB minimum
+ 'size_unit': 'mb', # 80MB minimum
+ 'vserver': 'ansibleSVM',
+ 'aggr_list': 'aggr1',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ }
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ error = 'Size "50MB" ("52428800B") is too small. Minimum size is "80MB" ("83886080B"). '
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection(job_error=error)
+ with patch.object(my_module, 'flexcache_create', wraps=my_obj.flexcache_create) as mock_create:
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print('Create: ' + repr(exc.value))
+ msg = 'Error when creating flexcache: %s' % error
+ assert exc.value.args[0]['msg'] == msg
+ mock_create.assert_called_with()
+
+ def test_create_flexcache_time_out(self):
+ ''' create flexcache '''
+ args = {
+ 'volume': 'volume_err',
+ 'size': '50', # 80MB minimum
+ 'size_unit': 'mb', # 80MB minimum
+ 'vserver': 'ansibleSVM',
+ 'aggr_list': 'aggr1',
+ 'origin_volume': 'fc_vol_origin',
+ 'origin_vserver': 'ansibleSVM',
+ 'time_out': '2'
+ }
+ args.update(self.args)
+ set_module_args(args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection(job_error='time_out')
+ with patch.object(my_module, 'flexcache_create', wraps=my_obj.flexcache_create) as mock_create:
+ # replace time.sleep with a noop
+ with patch('time.sleep', lambda a: None):
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print('Create: ' + repr(exc.value))
+ msg = 'Error when creating flexcache: job completion exceeded expected timer of: %s seconds' \
+ % args['time_out']
+ assert exc.value.args[0]['msg'] == msg
+ mock_create.assert_called_with()
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup.py
new file mode 100644
index 00000000..57609fc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup.py
@@ -0,0 +1,260 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup \
+ import NetAppOntapIgroup as igroup # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'igroup':
+ xml = self.build_igroup()
+ if self.kind == 'igroup_no_initiators':
+ xml = self.build_igroup_no_initiators()
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_igroup():
+ ''' build xml data for initiator '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'initiator-group-info': {
+ 'initiators': [
+ {
+ 'initiator-info': {
+ 'initiator-name': 'init1'
+ }},
+ {
+ 'initiator-info': {
+ 'initiator-name': 'init2'
+ }}
+ ]
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_igroup_no_initiators():
+ ''' build xml data for igroup with no initiators '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'initiator-group-info': {
+ 'vserver': 'test'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+
+ def mock_args(self):
+ return {
+ 'vserver': 'vserver',
+ 'name': 'test',
+ 'initiators': 'init1',
+ 'ostype': 'linux',
+ 'initiator_group_type': 'fcp',
+ 'bind_portset': 'true',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password'
+ }
+
+ def get_igroup_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_igroup object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_igroup object
+ """
+ obj = igroup()
+ obj.autosupport_log = Mock(return_value=None)
+ if kind is None:
+ obj.server = MockONTAPConnection()
+ else:
+ obj.server = MockONTAPConnection(kind=kind)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ igroup()
+
+ def test_get_nonexistent_igroup(self):
+ ''' Test if get_igroup returns None for non-existent igroup '''
+ data = self.mock_args()
+ set_module_args(data)
+ result = self.get_igroup_mock_object().get_igroup('dummy')
+ assert result is None
+
+ def test_get_existing_igroup_with_initiators(self):
+ ''' Test if get_igroup returns list of existing initiators '''
+ data = self.mock_args()
+ set_module_args(data)
+ result = self.get_igroup_mock_object('igroup').get_igroup(data['name'])
+ assert data['initiators'] in result['initiators']
+ assert result['initiators'] == ['init1', 'init2']
+
+ def test_get_existing_igroup_without_initiators(self):
+ ''' Test if get_igroup returns empty list() '''
+ data = self.mock_args()
+ set_module_args(data)
+ result = self.get_igroup_mock_object('igroup_no_initiators').get_igroup(data['name'])
+ assert result['initiators'] == []
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup.NetAppOntapIgroup.add_initiators')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup.NetAppOntapIgroup.remove_initiators')
+ def test_modify_initiator_calls_add_and_remove(self, remove, add):
+ '''Test remove_initiator() is called followed by add_initiator() on modify operation'''
+ data = self.mock_args()
+ data['initiators'] = 'replacewithme'
+ set_module_args(data)
+ obj = self.get_igroup_mock_object('igroup')
+ with pytest.raises(AnsibleExitJson) as exc:
+ current = obj.get_igroup(data['name'])
+ obj.apply()
+ remove.assert_called_with(current['initiators'])
+ add.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup.NetAppOntapIgroup.modify_initiator')
+ def test_modify_called_from_add(self, modify):
+ '''Test remove_initiator() and add_initiator() calls modify'''
+ data = self.mock_args()
+ data['initiators'] = 'replacewithme'
+ add, remove = 'igroup-add', 'igroup-remove'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_igroup_mock_object('igroup_no_initiators').apply()
+ modify.assert_called_with('replacewithme', add)
+ assert modify.call_count == 1 # remove nothing, add 1 new
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup.NetAppOntapIgroup.modify_initiator')
+ def test_modify_called_from_remove(self, modify):
+ '''Test remove_initiator() and add_initiator() calls modify'''
+ data = self.mock_args()
+ data['initiators'] = ''
+ remove = 'igroup-remove'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_igroup_mock_object('igroup').apply()
+ modify.assert_called_with('init2', remove)
+ assert modify.call_count == 2 # remove existing 2, add nothing
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup.NetAppOntapIgroup.add_initiators')
+ def test_successful_create(self, add):
+ ''' Test successful create '''
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_igroup_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ add.assert_called_with()
+
+ def test_successful_delete(self):
+ ''' Test successful delete '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_igroup_mock_object('igroup').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ ''' Test successful modify '''
+ data = self.mock_args()
+ data['initiators'] = 'new'
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_igroup_mock_object('igroup').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup.NetAppOntapIgroup.get_igroup')
+ def test_successful_rename(self, get_vserver):
+ '''Test successful rename'''
+ data = self.mock_args()
+ data['from_name'] = 'test'
+ data['name'] = 'test_new'
+ set_module_args(data)
+ current = {
+ 'initiators': ['init1', 'init2']
+ }
+ get_vserver.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_igroup_mock_object().apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py
new file mode 100644
index 00000000..d7e3bc68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py
@@ -0,0 +1,218 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup_initiator \
+ import NetAppOntapIgroupInitiator as initiator # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'initiator':
+ xml = self.build_igroup_initiator()
+ elif self.kind == 'initiator_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_igroup_initiator():
+ ''' build xml data for initiator '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'initiator-group-info': {
+ 'initiators': [
+ {'initiator-info': {
+ 'initiator-name': 'init1'
+ }},
+ {'initiator-info': {
+ 'initiator-name': 'init2'
+ }}
+ ]
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+
+ def mock_args(self):
+ return {
+ 'vserver': 'vserver',
+ 'name': 'init1',
+ 'initiator_group': 'test',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password'
+ }
+
+ def get_initiator_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_initiator object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_initiator object
+ """
+ obj = initiator()
+ obj.autosupport_log = Mock(return_value=None)
+ if kind is None:
+ obj.server = MockONTAPConnection()
+ else:
+ obj.server = MockONTAPConnection(kind=kind)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ initiator()
+
+ def test_get_nonexistent_initiator(self):
+ ''' Test if get_initiators returns None for non-existent initiator '''
+ data = self.mock_args()
+ data['name'] = 'idontexist'
+ set_module_args(data)
+ result = self.get_initiator_mock_object('initiator').get_initiators()
+ assert data['name'] not in result
+
+ def test_get_nonexistent_igroup(self):
+ ''' Test if get_initiators returns None for non-existent igroup '''
+ data = self.mock_args()
+ data['name'] = 'idontexist'
+ set_module_args(data)
+ result = self.get_initiator_mock_object().get_initiators()
+ assert result == []
+
+ def test_get_existing_initiator(self):
+ ''' Test if get_initiator returns None for existing initiator '''
+ data = self.mock_args()
+ set_module_args(data)
+ result = self.get_initiator_mock_object(kind='initiator').get_initiators()
+ assert data['name'] in result
+ assert result == ['init1', 'init2'] # from build_igroup_initiators()
+
+ def test_successful_add(self):
+ ''' Test successful add'''
+ data = self.mock_args()
+ data['name'] = 'iamnew'
+ set_module_args(data)
+ obj = self.get_initiator_mock_object('initiator')
+ with pytest.raises(AnsibleExitJson) as exc:
+ current = obj.get_initiators()
+ obj.apply()
+ assert data['name'] not in current
+ assert exc.value.args[0]['changed']
+
+ def test_successful_add_idempotency(self):
+ ''' Test successful add idempotency '''
+ data = self.mock_args()
+ set_module_args(data)
+ obj = self.get_initiator_mock_object('initiator')
+ with pytest.raises(AnsibleExitJson) as exc:
+ current_list = obj.get_initiators()
+ obj.apply()
+ assert data['name'] in current_list
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_remove(self):
+ ''' Test successful remove '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ obj = self.get_initiator_mock_object('initiator')
+ with pytest.raises(AnsibleExitJson) as exc:
+ current_list = obj.get_initiators()
+ obj.apply()
+ assert data['name'] in current_list
+ assert exc.value.args[0]['changed']
+
+ def test_successful_remove_idempotency(self):
+ ''' Test successful remove idempotency'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ data['name'] = 'alreadyremoved'
+ set_module_args(data)
+ obj = self.get_initiator_mock_object('initiator')
+ with pytest.raises(AnsibleExitJson) as exc:
+ current_list = obj.get_initiators()
+ obj.apply()
+ assert data['name'] not in current_list
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ data = self.mock_args()
+ set_module_args(data)
+ my_obj = self.get_initiator_mock_object('initiator_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_initiators()
+ assert 'Error fetching igroup info ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_initiator(data['name'], 'igroup-add')
+ assert 'Error modifying igroup initiator ' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_info.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_info.py
new file mode 100644
index 00000000..625fbb2b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_info.py
@@ -0,0 +1,557 @@
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for ONTAP Ansible module na_ontap_info '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+import sys
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_info import main as info_main
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_info import __finditem as info_finditem
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_info \
+ import NetAppONTAPGatherInfo as info_module # module under test
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_info \
+ import convert_keys as info_convert_keys # function under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'vserver':
+ xml = self.build_vserver_info()
+ elif self.type == 'net_port':
+ xml = self.build_net_port_info()
+ elif self.type == 'net_port_no_ifgrp':
+ xml = self.build_net_port_info('no_ifgrp')
+ elif self.type == 'net_port_with_ifgrp':
+ xml = self.build_net_port_info('with_ifgrp')
+ # for the next calls
+ self.type = 'net_ifgrp'
+ elif self.type == 'net_ifgrp':
+ xml = self.build_net_ifgrp_info()
+ elif self.type == 'zapi_error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ elif self.type == 'list_of_one':
+ xml = self.list_of_one()
+ elif self.type == 'list_of_two':
+ xml = self.list_of_two()
+ elif self.type == 'list_of_two_dups':
+ xml = self.list_of_two_dups()
+ else:
+ raise KeyError(self.type)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_vserver_info():
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('attributes-list')
+ attributes.add_node_with_children('vserver-info',
+ **{'vserver-name': 'test_vserver'})
+ xml.add_child_elem(attributes)
+ return xml
+
+ @staticmethod
+ def build_net_port_info(with_type=None):
+ ''' build xml data for net-port-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes_list = netapp_utils.zapi.NaElement('attributes-list')
+ num_net_port_info = 2
+ for i in range(num_net_port_info):
+ net_port_info = netapp_utils.zapi.NaElement('net-port-info')
+ net_port_info.add_new_child('node', 'node_' + str(i))
+ net_port_info.add_new_child('port', 'port_' + str(i))
+ net_port_info.add_new_child('broadcast_domain', 'test_domain_' + str(i))
+ net_port_info.add_new_child('ipspace', 'ipspace' + str(i))
+ if with_type == 'with_ifgrp':
+ net_port_info.add_new_child('port_type', 'if_group')
+ elif with_type == 'no_ifgrp':
+ net_port_info.add_new_child('port_type', 'whatever')
+ attributes_list.add_child_elem(net_port_info)
+ xml.add_child_elem(attributes_list)
+ return xml
+
+ @staticmethod
+ def build_net_ifgrp_info():
+ ''' build xml data for net-ifgrp-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes_list = netapp_utils.zapi.NaElement('attributes')
+ num_net_ifgrp_info = 2
+ for i in range(num_net_ifgrp_info):
+ net_ifgrp_info = netapp_utils.zapi.NaElement('net-ifgrp-info')
+ net_ifgrp_info.add_new_child('ifgrp-name', 'ifgrp_' + str(i))
+ net_ifgrp_info.add_new_child('node', 'node_' + str(i))
+ attributes_list.add_child_elem(net_ifgrp_info)
+ xml.add_child_elem(attributes_list)
+ return xml
+
+ @staticmethod
+ def list_of_one():
+ ''' build xml data for list of one info element '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ list_of_one = [{'k1': 'v1', 'k2': 'v2'}]
+ xml.translate_struct(list_of_one)
+ return xml
+
+ @staticmethod
+ def list_of_two():
+ ''' build xml data for list of two info elements '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ list_of_two = [{'k1': 'v1'}, {'k2': 'v2'}]
+ xml.translate_struct(list_of_two)
+ return xml
+
+ @staticmethod
+ def list_of_two_dups():
+ ''' build xml data for list of two info elements with same key '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ list_of_two = [{'k1': 'v1'}, {'k1': 'v2'}]
+ xml.translate_struct(list_of_two)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+
+ def mock_args(self):
+ return {
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'vserver': None
+ }
+
+ def get_info_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_info object
+ """
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='info', choices=['info']),
+ gather_subset=dict(default=['all'], type='list'),
+ vserver=dict(type='str', default=None, required=False),
+ max_records=dict(type='int', default=1024, required=False),
+ desired_attributes=dict(type='dict', required=False),
+ use_native_zapi_tags=dict(type='bool', required=False, default=False),
+ continue_on_error=dict(type='list', required=False, default=['never']),
+ query=dict(type='dict', required=False),
+ ))
+ module = basic.AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ max_records = module.params['max_records']
+ obj = info_module(module, max_records)
+ obj.netapp_info = dict()
+ if kind is None:
+ obj.server = MockONTAPConnection()
+ else:
+ obj.server = MockONTAPConnection(kind)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ self.get_info_mock_object()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_ensure_command_called(self, mock_ems_log):
+ ''' calling get_all will raise a KeyError exception '''
+ set_module_args(self.mock_args())
+ my_obj = self.get_info_mock_object('vserver')
+ with pytest.raises(KeyError) as exc:
+ my_obj.get_all(['net_interface_info'])
+ if sys.version_info >= (2, 7):
+ msg = 'net-interface-info'
+ print(exc.value.args[0])
+ assert exc.value.args[0] == msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_get_generic_get_iter(self, mock_ems_log):
+ '''calling get_generic_get_iter will return expected dict'''
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('net_port')
+ result = obj.get_generic_get_iter(
+ 'net-port-get-iter',
+ attribute='net-port-info',
+ key_fields=('node', 'port'),
+ query={'max-records': '1024'}
+ )
+ assert result.get('node_0:port_0')
+ assert result.get('node_1:port_1')
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_info.NetAppONTAPGatherInfo.get_all')
+ def test_main(self, get_all):
+ '''test main method.'''
+ set_module_args(self.mock_args())
+ get_all.side_effect = [
+ {'test_get_all':
+ {'vserver_login_banner_info': 'test_vserver_login_banner_info', 'vserver_info': 'test_vserver_info'}}
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ info_main()
+ assert exc.value.args[0]['state'] == 'info'
+
+ def test_get_ifgrp_info_no_ifgrp(self):
+ '''test get_ifgrp_info with empty ifgrp_info'''
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('net_port_no_ifgrp')
+ result = obj.get_ifgrp_info()
+ assert result == {}
+
+ def test_get_ifgrp_info_with_ifgrp(self):
+ '''test get_ifgrp_info with empty ifgrp_info'''
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('net_port_with_ifgrp')
+ result = obj.get_ifgrp_info()
+ assert result.get('node_0:ifgrp_0')
+ assert result.get('node_1:ifgrp_1')
+
+ def test_ontapi_error(self):
+ '''test ontapi will raise zapi error'''
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('zapi_error')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.ontapi()
+ # The new version of nettap-lib adds a space after :
+ # Keep both versions to keep the pipeline happy
+ assert exc.value.args[0]['msg'] == 'Error calling API system-get-ontapi-version: NetApp API failed. Reason - test:error'
+
+ def test_call_api_error(self):
+ '''test call_api will raise zapi error'''
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('zapi_error')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.call_api('nvme-get-iter')
+ # The new version of nettap-lib adds a space after :
+ # Keep both versions to keep the pipeline happy
+ assert exc.value.args[0]['msg'] == 'Error calling API nvme-get-iter: NetApp API failed. Reason - test:error'
+
+ def test_find_item(self):
+ '''test __find_item return expected key value'''
+ obj = {"A": 1, "B": {"C": {"D": 2}}}
+ key = "D"
+ result = info_finditem(obj, key)
+ assert result == 2
+
+ def test_subset_return_all_complete(self):
+ ''' Check all returns all of the entries if version is high enough '''
+ version = '170' # change this if new ZAPIs are supported
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ subset = obj.get_subset(['all'], version)
+ assert set(obj.info_subsets.keys()) == subset
+
+ def test_subset_return_all_partial(self):
+ ''' Check all returns a subset of the entries if version is low enough '''
+ version = '120' # low enough so that some ZAPIs are not supported
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ subset = obj.get_subset(['all'], version)
+ all_keys = obj.info_subsets.keys()
+ assert set(all_keys) > subset
+ supported_keys = filter(lambda key: obj.info_subsets[key]['min_version'] <= version, all_keys)
+ assert set(supported_keys) == subset
+
+ def test_subset_return_one(self):
+ ''' Check single entry returns one '''
+ version = '120' # low enough so that some ZAPIs are not supported
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ subset = obj.get_subset(['net_interface_info'], version)
+ assert len(subset) == 1
+
+ def test_subset_return_multiple(self):
+ ''' Check that more than one entry returns the same number '''
+ version = '120' # low enough so that some ZAPIs are not supported
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ subset_entries = ['net_interface_info', 'net_port_info']
+ subset = obj.get_subset(subset_entries, version)
+ assert len(subset) == len(subset_entries)
+
+ def test_subset_return_bad(self):
+ ''' Check that a bad subset entry will error out '''
+ version = '120' # low enough so that some ZAPIs are not supported
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.get_subset(['net_interface_info', 'my_invalid_subset'], version)
+ print('Info: %s' % exc.value.args[0]['msg'])
+ assert exc.value.args[0]['msg'] == 'Bad subset: my_invalid_subset'
+
+ def test_subset_return_unsupported(self):
+ ''' Check that a new subset entry will error out on an older system '''
+ version = '120' # low enough so that some ZAPIs are not supported
+ key = 'nvme_info' # only supported starting at 140
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.get_subset(['net_interface_info', key], version)
+ print('Info: %s' % exc.value.args[0]['msg'])
+ msg = 'Remote system at version %s does not support %s' % (version, key)
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_subset_return_none(self):
+ ''' Check usable subset can be empty '''
+ version = '!' # lower then 0, so that no ZAPI is supported
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ subset = obj.get_subset(['all'], version)
+ assert len(subset) == 0
+
+ def test_subset_return_all_expect_one(self):
+ ''' Check !x returns all of the entries except x if version is high enough '''
+ version = '170' # change this if new ZAPIs are supported
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ subset = obj.get_subset(['!net_interface_info'], version)
+ assert len(obj.info_subsets.keys()) == len(subset) + 1
+ subset.add('net_interface_info')
+ assert set(obj.info_subsets.keys()) == subset
+
+ def test_subset_return_all_expect_three(self):
+ ''' Check !x,!y,!z returns all of the entries except x, y, z if version is high enough '''
+ version = '170' # change this if new ZAPIs are supported
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ subset = obj.get_subset(['!net_interface_info', '!nvme_info', '!ontap_version'], version)
+ assert len(obj.info_subsets.keys()) == len(subset) + 3
+ subset.update(['net_interface_info', 'nvme_info', 'ontap_version'])
+ assert set(obj.info_subsets.keys()) == subset
+
+ def test_subset_return_none_with_exclusion(self):
+ ''' Check usable subset can be empty with !x '''
+ version = '!' # lower then 0, so that no ZAPI is supported
+ key = 'net_interface_info'
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.get_subset(['!' + key], version)
+ print('Info: %s' % exc.value.args[0]['msg'])
+ msg = 'Remote system at version %s does not support %s' % (version, key)
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_get_generic_get_iter_flatten_list_of_one(self, mock_ems_log):
+ '''calling get_generic_get_iter will return expected dict'''
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('list_of_one')
+ result = obj.get_generic_get_iter(
+ 'list_of_one',
+ attributes_list_tag=None,
+ )
+ assert isinstance(result, dict)
+ assert result.get('k1') == 'v1'
+ assert result.get('k2') == 'v2'
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_get_generic_get_iter_flatten_list_of_two(self, mock_ems_log):
+ '''calling get_generic_get_iter will return expected dict'''
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('list_of_two')
+ result = obj.get_generic_get_iter(
+ 'list_of_two',
+ attributes_list_tag=None,
+ )
+ assert isinstance(result, dict)
+ assert result.get('k1') == 'v1'
+ assert result.get('k2') == 'v2'
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_get_generic_get_iter_flatten_list_of_two_dups(self, mock_ems_log):
+ '''calling get_generic_get_iter will return expected dict'''
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('list_of_two_dups')
+ result = obj.get_generic_get_iter(
+ 'list_of_two_dups',
+ attributes_list_tag=None,
+ )
+ assert isinstance(result, list)
+ assert result[0].get('k1') == 'v1'
+ assert result[1].get('k1') == 'v2'
+
+ def test_check_underscore(self):
+ ''' Check warning is recorded if '_' is found in key '''
+ test_dict = dict(
+ bad_key='something'
+ )
+ test_dict['good-key'] = [dict(
+ other_bad_key=dict(
+ yet_another_bad_key=1
+ ),
+ somekey=dict(
+ more_bad_key=2
+ )
+ )]
+ set_module_args(self.mock_args())
+ obj = self.get_info_mock_object('vserver')
+ obj.check_for___in_keys(test_dict)
+ print('Info: %s' % repr(obj.warnings))
+ for key in ['bad_key', 'other_bad_key', 'yet_another_bad_key', 'more_bad_key']:
+ msg = "Underscore in ZAPI tag: %s, do you mean '-'?" % key
+ assert msg in obj.warnings
+ obj.warnings.remove(msg)
+ # make sure there is no extra warnings (eg we found and removed all of them)
+ assert obj.warnings == list()
+
+ @staticmethod
+ def d2us(astr):
+ return str.replace(astr, '-', '_')
+
+ def test_convert_keys_string(self):
+ ''' no conversion '''
+ key = 'a-b-c'
+ assert info_convert_keys(key) == key
+
+ def test_convert_keys_tuple(self):
+ ''' no conversion '''
+ key = 'a-b-c'
+ anobject = (key, key)
+ assert info_convert_keys(anobject) == anobject
+
+ def test_convert_keys_list(self):
+ ''' no conversion '''
+ key = 'a-b-c'
+ anobject = [key, key]
+ assert info_convert_keys(anobject) == anobject
+
+ def test_convert_keys_simple_dict(self):
+ ''' conversion of keys '''
+ key = 'a-b-c'
+ anobject = {key: 1}
+ assert list(info_convert_keys(anobject).keys())[0] == self.d2us(key)
+
+ def test_convert_keys_list_of_dict(self):
+ ''' conversion of keys '''
+ key = 'a-b-c'
+ anobject = [{key: 1}, {key: 2}]
+ converted = info_convert_keys(anobject)
+ for adict in converted:
+ for akey in adict:
+ assert akey == self.d2us(key)
+
+ def test_set_error_flags_error_n(self):
+ ''' Check set_error__flags return correct dict '''
+ args = dict(self.mock_args())
+ args['continue_on_error'] = ['never', 'whatever']
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = self.get_info_mock_object('vserver')
+ print('Info: %s' % exc.value.args[0]['msg'])
+ msg = "never needs to be the only keyword in 'continue_on_error' option."
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_set_error_flags_error_a(self):
+ ''' Check set_error__flags return correct dict '''
+ args = dict(self.mock_args())
+ args['continue_on_error'] = ['whatever', 'always']
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = self.get_info_mock_object('vserver')
+ print('Info: %s' % exc.value.args[0]['msg'])
+ msg = "always needs to be the only keyword in 'continue_on_error' option."
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_set_error_flags_error_u(self):
+ ''' Check set_error__flags return correct dict '''
+ args = dict(self.mock_args())
+ args['continue_on_error'] = ['whatever', 'else']
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = self.get_info_mock_object('vserver')
+ print('Info: %s' % exc.value.args[0]['msg'])
+ msg = "whatever is not a valid keyword in 'continue_on_error' option."
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_set_error_flags_1_flag(self):
+ ''' Check set_error__flags return correct dict '''
+ args = dict(self.mock_args())
+ args['continue_on_error'] = ['missing_vserver_api_error']
+ set_module_args(args)
+ obj = self.get_info_mock_object('vserver')
+ assert not obj.error_flags['missing_vserver_api_error']
+ assert obj.error_flags['rpc_error']
+ assert obj.error_flags['other_error']
+
+ def test_set_error_flags_2_flags(self):
+ ''' Check set_error__flags return correct dict '''
+ args = dict(self.mock_args())
+ args['continue_on_error'] = ['missing_vserver_api_error', 'rpc_error']
+ set_module_args(args)
+ obj = self.get_info_mock_object('vserver')
+ assert not obj.error_flags['missing_vserver_api_error']
+ assert not obj.error_flags['rpc_error']
+ assert obj.error_flags['other_error']
+
+ def test_set_error_flags_3_flags(self):
+ ''' Check set_error__flags return correct dict '''
+ args = dict(self.mock_args())
+ args['continue_on_error'] = ['missing_vserver_api_error', 'rpc_error', 'other_error']
+ set_module_args(args)
+ obj = self.get_info_mock_object('vserver')
+ assert not obj.error_flags['missing_vserver_api_error']
+ assert not obj.error_flags['rpc_error']
+ assert not obj.error_flags['other_error']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_interface.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_interface.py
new file mode 100644
index 00000000..67c04c2c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_interface.py
@@ -0,0 +1,312 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_interface \
+ import NetAppOntapInterface as interface_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'interface':
+ xml = self.build_interface_info(self.params)
+ elif self.type == 'zapi_error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_interface_info(data):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'net-interface-info': {
+ 'interface-name': data['name'],
+ 'administrative-status': data['administrative-status'],
+ 'failover-policy': data['failover-policy'],
+ 'firewall-policy': data['firewall-policy'],
+ 'is-auto-revert': data['is-auto-revert'],
+ 'home-node': data['home_node'],
+ 'home-port': data['home_port'],
+ 'address': data['address'],
+ 'netmask': data['netmask'],
+ 'role': data['role'],
+ 'protocols': data['protocols'] if data.get('protocols') else None,
+ 'dns-domain-name': data['dns_domain_name'],
+ 'listen-for-dns_query': data['listen_for_dns_query'],
+ 'is-dns-update-enabled': data['is_dns_update_enabled']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_interface = {
+ 'name': 'test_lif',
+ 'administrative-status': 'up',
+ 'failover-policy': 'up',
+ 'firewall-policy': 'up',
+ 'is-auto-revert': 'true',
+ 'home_node': 'node',
+ 'role': 'data',
+ 'home_port': 'e0c',
+ 'address': '2.2.2.2',
+ 'netmask': '1.1.1.1',
+ 'dns_domain_name': 'test.com',
+ 'listen_for_dns_query': True,
+ 'is_dns_update_enabled': True,
+ 'admin_status': 'up'
+ }
+
+ def mock_args(self):
+ return {
+ 'vserver': 'vserver',
+ 'interface_name': self.mock_interface['name'],
+ 'home_node': self.mock_interface['home_node'],
+ 'role': self.mock_interface['role'],
+ 'home_port': self.mock_interface['home_port'],
+ 'address': self.mock_interface['address'],
+ 'netmask': self.mock_interface['netmask'],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def get_interface_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_interface object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_interface object
+ """
+ interface_obj = interface_module()
+ interface_obj.autosupport_log = Mock(return_value=None)
+ if kind is None:
+ interface_obj.server = MockONTAPConnection()
+ else:
+ interface_obj.server = MockONTAPConnection(kind=kind, data=self.mock_interface)
+ return interface_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ interface_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if required param 'role' is not specified'''
+ data = self.mock_args()
+ del data['role']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_interface_mock_object('interface').create_interface()
+ msg = 'Error: Missing one or more required parameters for creating interface: ' \
+ 'home_port, netmask, role, home_node, address'
+ expected = sorted(','.split(msg))
+ received = sorted(','.split(exc.value.args[0]['msg']))
+ assert expected == received
+
+ def test_get_nonexistent_interface(self):
+ ''' Test if get_interface returns None for non-existent interface '''
+ set_module_args(self.mock_args())
+ result = self.get_interface_mock_object().get_interface()
+ assert result is None
+
+ def test_get_existing_interface(self):
+ ''' Test if get_interface returns None for existing interface '''
+ set_module_args(self.mock_args())
+ result = self.get_interface_mock_object(kind='interface').get_interface()
+ assert result['interface_name'] == self.mock_interface['name']
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_interface_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_create_for_NVMe(self):
+ ''' Test successful create for NVMe protocol'''
+ data = self.mock_args()
+ data['protocols'] = 'fc-nvme'
+ del data['address']
+ del data['netmask']
+ del data['home_port']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_interface_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency_for_NVMe(self):
+ ''' Test create idempotency for NVMe protocol '''
+ data = self.mock_args()
+ data['protocols'] = 'fc-nvme'
+ del data['address']
+ del data['netmask']
+ del data['home_port']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_interface_mock_object('interface').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_create_error_for_NVMe(self):
+ ''' Test if create throws an error if required param 'protocols' uses NVMe'''
+ data = self.mock_args()
+ data['protocols'] = 'fc-nvme'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_interface_mock_object('interface').create_interface()
+ msg = 'Error: Following parameters for creating interface are not supported for data-protocol fc-nvme: ' \
+ 'netmask, firewall_policy, address'
+ expected = sorted(','.split(msg))
+ received = sorted(','.split(exc.value.args[0]['msg']))
+ assert expected == received
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_interface_mock_object('interface').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_delete(self):
+ ''' Test delete existing interface '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_interface_mock_object('interface').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ ''' Test delete idempotency '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_interface_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ ''' Test successful modify interface_minutes '''
+ data = self.mock_args()
+ data['home_port'] = 'new_port'
+ data['dns_domain_name'] = 'test2.com'
+ data['listen_for_dns_query'] = False
+ data['is_dns_update_enabled'] = False
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ interface_obj = self.get_interface_mock_object('interface')
+ interface_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_idempotency(self):
+ ''' Test modify idempotency '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_interface_mock_object('interface').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_interface.NetAppOntapInterface.get_interface')
+ def test_error_message(self, get_interface):
+ ''' Test modify idempotency '''
+ data = self.mock_args()
+ set_module_args(data)
+ get_interface.side_effect = [None]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_interface_mock_object('zapi_error').apply()
+ assert exc.value.args[0]['msg'] == 'Error Creating interface test_lif: NetApp API failed. Reason - test:error'
+
+ data = self.mock_args()
+ data['home_port'] = 'new_port'
+ data['dns_domain_name'] = 'test2.com'
+ data['listen_for_dns_query'] = False
+ data['is_dns_update_enabled'] = False
+ set_module_args(data)
+ get_interface.side_effect = [
+ self.mock_interface
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_interface_mock_object('zapi_error').apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying interface test_lif: NetApp API failed. Reason - test:error'
+
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ current = self.mock_interface
+ current['admin_status'] = 'down'
+ get_interface.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_interface_mock_object('zapi_error').apply()
+ assert exc.value.args[0]['msg'] == 'Error deleting interface test_lif: NetApp API failed. Reason - test:error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ipspace.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ipspace.py
new file mode 100644
index 00000000..641e1414
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ipspace.py
@@ -0,0 +1,269 @@
+# (c) 2018, NTT Europe Ltd.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit test for Ansible module: na_ontap_ipspace """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ipspace \
+ import NetAppOntapIpspace as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'ipspace_record': (200, {'records': [{"name": "test_ipspace",
+ "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"}]}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'ipspace':
+ xml = self.build_ipspace_info(self.parm1)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_ipspace_info(ipspace):
+ ''' build xml data for ipspace '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'net-ipspaces-info': {'ipspace': ipspace}}}
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test_ipspace',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password'
+ })
+
+ @staticmethod
+ def get_ipspace_mock_object(cx_type='zapi', kind=None, status=None):
+ ipspace_obj = my_module()
+ if cx_type == 'zapi':
+ if kind is None:
+ ipspace_obj.server = MockONTAPConnection()
+ else:
+ ipspace_obj.server = MockONTAPConnection(kind=kind, parm1=status)
+ return ipspace_obj
+
+ def test_fail_requiredargs_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_get_ipspace_iscalled(self, mock_request):
+ ''' test if get_ipspace() is called '''
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ SRR['end_of_sequence']
+ ]
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ ipspace = my_obj.get_ipspace()
+ print('Info: test_get_ipspace: %s' % repr(ipspace))
+ assert ipspace is None
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_ipspace_apply_iscalled(self, mock_request):
+ ''' test if apply() is called '''
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ SRR['end_of_sequence']
+ ]
+ module_args = {'name': 'test_apply_ips'}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.server = self.server
+ ipspace = my_obj.get_ipspace()
+ print('Info: test_get_ipspace: %s' % repr(ipspace))
+ assert ipspace is None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ipspace_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+ my_obj.server = MockONTAPConnection('ipspace', 'test_apply_ips')
+ ipspace = my_obj.get_ipspace()
+ print('Info: test_get_ipspace: %s' % repr(ipspace))
+ assert ipspace is not None
+ assert ipspace['name'] == 'test_apply_ips'
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ipspace_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+ ipspace = my_obj.get_ipspace()
+ assert ipspace['name'] == 'test_apply_ips'
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = self.set_default_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ipspace_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_create_rest(self, mock_request):
+ data = self.set_default_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['empty_good'], # post
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ipspace_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_idempotent_create_rest(self, mock_request):
+ data = self.set_default_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['ipspace_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ipspace_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_delete_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['ipspace_record'], # get
+ SRR['empty_good'], # delete
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ipspace_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_idempotent_delete_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ipspace_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_modify_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ipspace_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_idempotent_modify_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['ipspace_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ipspace_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi_security.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi_security.py
new file mode 100644
index 00000000..bb31fd65
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi_security.py
@@ -0,0 +1,256 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_iscsi_security '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_iscsi_security \
+ import NetAppONTAPIscsiSecurity as iscsi_module # module under test
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_uuid': (
+ 200,
+ {
+ "records": [
+ {
+ "uuid": "e2e89ccc-db35-11e9-0000-000000000000"
+ }
+ ],
+ "num_records": 1
+ }, None),
+ 'get_initiator': (
+ 200,
+ {
+ "records": [
+ {
+ "svm": {
+ "uuid": "e2e89ccc-db35-11e9-0000-000000000000",
+ "name": "test_ansible"
+ },
+ "initiator": "eui.0123456789abcdef",
+ "authentication_type": "chap",
+ "chap": {
+ "inbound": {
+ "user": "test_user_1"
+ },
+ "outbound": {
+ "user": "test_user_2"
+ }
+ },
+ "initiator_address": {
+ "ranges": [
+ {
+ "start": "10.125.10.0",
+ "end": "10.125.10.10",
+ "family": "ipv4"
+ },
+ {
+ "start": "10.10.10.7",
+ "end": "10.10.10.7",
+ "family": "ipv4"
+ }
+ ]
+ }
+ }
+ ],
+ "num_records": 1
+ }, None),
+ "no_record": (
+ 200,
+ {
+ "num_records": 0
+ }, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_iscsi_security '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_iscsi = {
+ "initiator": "eui.0123456789abcdef",
+ "inbound_username": "test_user_1",
+ "inbound_password": "123",
+ "outbound_username": "test_user_2",
+ "outbound_password": "321",
+ "auth_type": "chap",
+ "address_ranges": ["10.125.10.0-10.125.10.10", "10.10.10.7"]
+ }
+
+ def mock_args(self):
+ return {
+ 'initiator': self.mock_iscsi['initiator'],
+ 'inbound_username': self.mock_iscsi['inbound_username'],
+ 'inbound_password': self.mock_iscsi['inbound_password'],
+ 'outbound_username': self.mock_iscsi['outbound_username'],
+ 'outbound_password': self.mock_iscsi['outbound_password'],
+ 'auth_type': self.mock_iscsi['auth_type'],
+ 'address_ranges': self.mock_iscsi['address_ranges'],
+ 'hostname': 'test',
+ 'vserver': 'test_vserver',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_iscsi_mock_object(self):
+ """
+ Helper method to return an na_ontap_iscsi_security object
+ :return: na_ontap_iscsi_security object
+ """
+ iscsi_obj = iscsi_module()
+ return iscsi_obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ '''Test successful rest create'''
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['get_uuid'],
+ SRR['no_record'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_iscsi_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_idempotency(self, mock_request):
+ '''Test rest create idempotency'''
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['get_uuid'],
+ SRR['get_initiator'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_iscsi_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_modify_address(self, mock_request):
+ '''Test successful rest modify'''
+ data = self.mock_args()
+ data['address_ranges'] = ['10.10.10.8']
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['get_uuid'],
+ SRR['get_initiator'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_iscsi_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_modify_user(self, mock_request):
+ '''Test successful rest modify'''
+ data = self.mock_args()
+ data['inbound_username'] = 'test_user_3'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['get_uuid'],
+ SRR['get_initiator'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_iscsi_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ '''Test rest error'''
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['get_uuid'],
+ SRR['no_record'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_iscsi_mock_object().apply()
+ assert 'Error on creating initiator: Expected error' in exc.value.args[0]['msg']
+
+ data = self.mock_args()
+ data['inbound_username'] = 'test_user_3'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['get_uuid'],
+ SRR['get_initiator'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_iscsi_mock_object().apply()
+ assert 'Error on modifying initiator: Expected error' in exc.value.args[0]['msg']
+
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['get_uuid'],
+ SRR['get_initiator'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_iscsi_mock_object().apply()
+ assert 'Error on deleting initiator: Expected error' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_job_schedule.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_job_schedule.py
new file mode 100644
index 00000000..c7196677
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_job_schedule.py
@@ -0,0 +1,369 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_job_schedule '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_job_schedule \
+ import NetAppONTAPJob as job_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_schedule': (
+ 200,
+ {
+ "records": [
+ {
+ "uuid": "010df156-e0a9-11e9-9f70-005056b3df08",
+ "name": "test_job",
+ "cron": {
+ "minutes": [
+ 25
+ ],
+ "hours": [
+ 0
+ ],
+ "weekdays": [
+ 0
+ ]
+ }
+ }
+ ],
+ "num_records": 1
+ }, None),
+ "no_record": (
+ 200,
+ {"num_records": 0},
+ None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'job':
+ xml = self.build_job_schedule_cron_info(self.params)
+ elif self.kind == 'job_multiple':
+ xml = self.build_job_schedule_multiple_cron_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ def autosupport_log(self):
+ ''' Mock autosupport log method, returns None '''
+ return None
+
+ @staticmethod
+ def build_job_schedule_cron_info(job_details):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'job-schedule-cron-info': {
+ 'job-schedule-name': job_details['name'],
+ 'job-schedule-cron-minute': {
+ 'cron-minute': job_details['minutes']
+ }
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_job_schedule_multiple_cron_info(job_details):
+ ''' build xml data for vserser-info '''
+ print("CALLED MULTIPLE BUILD")
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'job-schedule-cron-info': {
+ 'job-schedule-name': job_details['name'],
+ 'job-schedule-cron-minute': [
+ {'cron-minute': '25'},
+ {'cron-minute': '35'}
+ ],
+ 'job-schedule-cron-month': [
+ {'cron-month': '5'},
+ {'cron-month': '10'}
+ ]
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_job = {
+ 'name': 'test_job',
+ 'minutes': '25',
+ 'job_hours': ['0'],
+ 'weekdays': ['0']
+ }
+
+ def mock_args(self, rest=False):
+ if rest:
+ return {
+ 'name': self.mock_job['name'],
+ 'job_minutes': [self.mock_job['minutes']],
+ 'job_hours': self.mock_job['job_hours'],
+ 'job_days_of_week': self.mock_job['weekdays'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+ else:
+ return {
+ 'name': self.mock_job['name'],
+ 'job_minutes': [self.mock_job['minutes']],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'use_rest': 'never'
+ }
+
+ def get_job_mock_object(self, kind=None, call_type='zapi'):
+ """
+ Helper method to return an na_ontap_job_schedule object
+ :param kind: passes this param to MockONTAPConnection()
+ :param call_type:
+ :return: na_ontap_job_schedule object
+ """
+ job_obj = job_module()
+ job_obj.autosupport_log = Mock(return_value=None)
+ if call_type == 'zapi':
+ if kind is None:
+ job_obj.server = MockONTAPConnection()
+ else:
+ job_obj.server = MockONTAPConnection(kind=kind, data=self.mock_job)
+ return job_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ job_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_job(self):
+ ''' Test if get_job_schedule returns None for non-existent job '''
+ set_module_args(self.mock_args())
+ result = self.get_job_mock_object().get_job_schedule()
+ assert result is None
+
+ def test_get_existing_job(self):
+ ''' Test if get_job_schedule retuns job details for existing job '''
+ data = self.mock_args()
+ set_module_args(data)
+ result = self.get_job_mock_object('job').get_job_schedule()
+ assert result['name'] == self.mock_job['name']
+ assert result['job_minutes'] == data['job_minutes']
+
+ def test_get_existing_job_multiple_minutes(self):
+ ''' Test if get_job_schedule retuns job details for existing job '''
+ set_module_args(self.mock_args())
+ result = self.get_job_mock_object('job_multiple').get_job_schedule()
+ print(str(result))
+ assert result['name'] == self.mock_job['name']
+ assert result['job_minutes'] == ['25', '35']
+ assert result['job_months'] == ['5', '10']
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if job_minutes is not specified'''
+ data = self.mock_args()
+ del data['job_minutes']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_job_mock_object('job').create_job_schedule()
+ msg = 'Error: missing required parameter job_minutes for create'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_job_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_job_mock_object('job').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_delete(self):
+ ''' Test delete existing job '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_job_mock_object('job').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ ''' Test delete idempotency '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_job_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ ''' Test successful modify job_minutes '''
+ data = self.mock_args()
+ data['job_minutes'] = ['20']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_job_mock_object('job').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_idempotency(self):
+ ''' Test modify idempotency '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_job_mock_object('job').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ '''Test successful rest create'''
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['no_record'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_job_mock_object(call_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_idempotency(self, mock_request):
+ '''Test rest create idempotency'''
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_schedule'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_job_mock_object(call_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ '''Test rest create idempotency'''
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['no_record'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_job_mock_object(call_type='rest').apply()
+ assert 'Error on creating job schedule: Expected error' in exc.value.args[0]['msg']
+
+ data = self.mock_args(rest=True)
+ data['job_minutes'] = ['20']
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_schedule'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_job_mock_object(call_type='rest').apply()
+ assert 'Error on modifying job schedule: Expected error' in exc.value.args[0]['msg']
+
+ data = self.mock_args(rest=True)
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_schedule'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_job_mock_object(call_type='rest').apply()
+ assert 'Error on deleting job schedule: Expected error' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py
new file mode 100644
index 00000000..5cdcb75a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py
@@ -0,0 +1,269 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test for ONTAP Kerberos Realm module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_kerberos_realm \
+ import NetAppOntapKerberosRealm as my_module # module under test
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import Mock
+
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ print(Exception)
+ # pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ print(Exception)
+ # pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+
+ if self.type == 'present_realm':
+ xml = self.build_realm()
+ else:
+ xml = self.build_empty_realm()
+
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_realm():
+ ''' build xml data for kerberos realm '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': "1",
+ 'attributes-list': {
+ 'kerberos-realm': {
+ 'admin-server-ip': "192.168.0.1",
+ 'admin-server-port': "749",
+ 'clock-skew': "5",
+ 'kdc-ip': "192.168.0.1",
+ 'kdc-port': "88",
+ 'kdc-vendor': "other",
+ 'password-server-ip': "192.168.0.1",
+ 'password-server-port': "464",
+ "permitted-enc-types": {
+ "string": ["des", "des3", "aes_128", "aes_256"]
+ },
+ 'realm': "EXAMPLE.COM",
+ 'vserver-name': "vserver0"
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_empty_realm():
+ ''' build xml data for kerberos realm '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': "0",
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection(kind='present_realm')
+
+ @staticmethod
+ def get_kerberos_realm_mock_object(kind=None):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume object
+ """
+ krbrealm_obj = my_module()
+ krbrealm_obj.asup_log_for_cserver = Mock(return_value=None)
+ krbrealm_obj.cluster = Mock()
+ krbrealm_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ krbrealm_obj.server = MockONTAPConnection()
+ else:
+ krbrealm_obj.server = MockONTAPConnection(kind=kind)
+ return krbrealm_obj
+
+ @staticmethod
+ def mock_args():
+ '''Set default arguments'''
+ return dict({
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': True,
+ 'validate_certs': False
+ })
+
+ @staticmethod
+ def test_module_fail_when_required_args_missing():
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_module_fail_when_state_present_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ data = self.mock_args()
+ data['state'] = 'present'
+ data['vserver'] = 'vserver1'
+ data['realm'] = 'NETAPP.COM'
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(data)
+ my_module()
+ msg = "state is present but all of the following are missing: kdc_vendor, kdc_ip"
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_get_nonexistent_realm(self):
+ ''' Test if get_krbrealm returns None for non-existent kerberos realm '''
+ data = self.mock_args()
+ data['vserver'] = 'none'
+ data['realm'] = 'none'
+ data['state'] = 'present'
+ data['kdc_ip'] = 'none'
+ data['kdc_vendor'] = 'other'
+ set_module_args(data)
+ result = self.get_kerberos_realm_mock_object().get_krbrealm()
+ assert result is None
+
+ def test_get_existing_realm(self):
+ ''' Test if get_krbrealm returns details for existing kerberos realm '''
+ data = self.mock_args()
+ data['vserver'] = 'vserver0'
+ data['realm'] = 'EXAMPLE.COM'
+ data['state'] = 'present'
+ data['kdc_ip'] = '10.0.0.1'
+ data['kdc_vendor'] = 'other'
+ set_module_args(data)
+ result = self.get_kerberos_realm_mock_object('present_realm').get_krbrealm()
+ assert result['realm']
+
+ def test_successfully_modify_realm(self):
+ ''' Test modify realm successful for modifying kdc_ip. '''
+ data = self.mock_args()
+ data['vserver'] = 'vserver0'
+ data['realm'] = 'EXAMPLE.COM'
+ data['kdc_ip'] = '192.168.10.10'
+ data['state'] = 'present'
+ data['kdc_ip'] = '10.0.0.1'
+ data['kdc_vendor'] = 'other'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_kerberos_realm_mock_object('present_realm').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_kerberos_realm.NetAppOntapKerberosRealm.delete_krbrealm')
+ def test_successfully_delete_realm(self, delete_krbrealm):
+ ''' Test successfully delete realm '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ data['vserver'] = 'vserver0'
+ data['realm'] = 'EXAMPLE.COM'
+ set_module_args(data)
+ obj = self.get_kerberos_realm_mock_object('present_realm')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_krbrealm.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_kerberos_realm.NetAppOntapKerberosRealm.create_krbrealm')
+ def test_successfully_create_realm(self, create_krbrealm):
+ ''' Test successfully create realm '''
+ data = self.mock_args()
+ data['state'] = 'present'
+ data['vserver'] = 'vserver1'
+ data['realm'] = 'NETAPP.COM'
+ data['kdc_ip'] = '10.0.0.1'
+ data['kdc_vendor'] = 'other'
+ set_module_args(data)
+ obj = self.get_kerberos_realm_mock_object()
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+ create_krbrealm.assert_called_with()
+
+ def test_required_if(self):
+ ''' required arguments are reported as errors '''
+ data = self.mock_args()
+ data['state'] = 'present'
+ data['vserver'] = 'vserver1'
+ data['realm'] = 'NETAPP.COM'
+ data['kdc_ip'] = '10.0.0.1'
+ data['kdc_vendor'] = 'microsoft'
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(data)
+ my_module()
+ msg = "kdc_vendor is microsoft but all of the following are missing: ad_server_ip, ad_server_name"
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_required_if_single(self):
+ ''' required arguments are reported as errors '''
+ data = self.mock_args()
+ data['state'] = 'present'
+ data['vserver'] = 'vserver1'
+ data['realm'] = 'NETAPP.COM'
+ data['kdc_ip'] = '10.0.0.1'
+ data['kdc_vendor'] = 'microsoft'
+ data['ad_server_ip'] = '10.0.0.1'
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args(data)
+ my_module()
+ msg = "kdc_vendor is microsoft but all of the following are missing: ad_server_name"
+ assert exc.value.args[0]['msg'] == msg
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ldap_client.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ldap_client.py
new file mode 100644
index 00000000..d8502342
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ldap_client.py
@@ -0,0 +1,185 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_ldap_client '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ldap_client \
+ import NetAppOntapLDAPClient as client_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'client':
+ xml = self.build_ldap_client_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_ldap_client_info(client_details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'ldap-client': {
+ 'ldap-client-config': client_details['name'],
+ 'schema': client_details['schema'],
+ 'ldap-servers': [
+ {"ldap-server": client_details['ldap_servers'][0]},
+ {"ldap-server": client_details['ldap_servers'][1]}
+ ]
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_client = {
+ 'state': 'present',
+ 'name': 'test_ldap',
+ 'ldap_servers': ['ldap1.example.company.com', 'ldap2.example.company.com'],
+ 'schema': 'RFC-2307',
+ 'vserver': 'test_vserver',
+ }
+
+ def mock_args(self):
+ return {
+ 'state': self.mock_client['state'],
+ 'name': self.mock_client['name'],
+ 'ldap_servers': self.mock_client['ldap_servers'],
+ 'schema': self.mock_client['schema'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'vserver': 'test_vserver',
+ }
+
+ def get_client_mock_object(self, kind=None):
+ client_obj = client_module()
+ client_obj.asup_log_for_cserver = Mock(return_value=None)
+ if kind is None:
+ client_obj.server = MockONTAPConnection()
+ else:
+ client_obj.server = MockONTAPConnection(kind='client', data=self.mock_client)
+ return client_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ client_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_client(self):
+ ''' Test if get ldap client returns None for non-existent job '''
+ set_module_args(self.mock_args())
+ result = self.get_client_mock_object().get_ldap_client()
+ assert not result
+
+ def test_get_existing_client(self):
+ ''' Test if get ldap client returns None for non-existent job '''
+ set_module_args(self.mock_args())
+ result = self.get_client_mock_object('client').get_ldap_client()
+ assert result
+
+ def test_successfully_create(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_client_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_client_mock_object('client').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_delete(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_client_mock_object('client').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_client_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_modify(self):
+ data = self.mock_args()
+ data['ldap_servers'] = ["ldap1.example.company.com"]
+ set_module_args(data)
+ print(self.get_client_mock_object('client').get_ldap_client())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_client_mock_object('client').apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_login_messages.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_login_messages.py
new file mode 100644
index 00000000..f7b81ae4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_login_messages.py
@@ -0,0 +1,287 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_login_messages'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_login_messages \
+ import NetAppOntapLoginMessages as messages_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required"
+
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # 'dns_record': ({"records": [{"message": "test message",
+ # "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}]}, None),
+ 'svm_uuid': (200, {"records": [{"uuid": "test_uuid"}], "num_records": 1}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ request = xml.to_string().decode('utf-8')
+ print(request)
+ if self.kind == 'error':
+ raise netapp_utils.zapi.NaApiError('test', 'expect error')
+ elif request.startswith("<ems-autosupport-log>"):
+ xml = None # or something that may the logger happy, and you don't need @patch anymore
+ # or
+ # xml = build_ems_log_response()
+ elif request.startswith("<vserver-login-banner-get-iter>"):
+ if self.kind == 'create':
+ xml = self.build_banner_info()
+ # elif self.kind == 'create_idempotency':
+ # xml = self.build_banner_info(self.params)
+ else:
+ xml = self.build_banner_info(self.params)
+ elif request.startswith("<vserver-login-banner-modify-iter>"):
+ xml = self.build_banner_info(self.params)
+ elif request.startswith("<vserver-motd-modify-iter>"):
+ xml = self.build_motd_info(self.params)
+ elif request.startswith("<vserver-motd-get-iter>"):
+ if self.kind == 'create':
+ xml = self.build_motd_info()
+ # elif self.kind == 'create_idempotency':
+ # xml = self.build_banner_info(self.params)
+ else:
+ xml = self.build_motd_info(self.params)
+
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_banner_info(data=None):
+ xml = netapp_utils.zapi.NaElement('xml')
+ vserver = 'vserver'
+ attributes = {'num-records': 1,
+ 'attributes-list': {'vserver-login-banner-info': {'vserver': vserver}}}
+ if data is not None and data.get('banner'):
+ attributes['attributes-list']['vserver-login-banner-info']['message'] = data['banner']
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_motd_info(data=None):
+ xml = netapp_utils.zapi.NaElement('xml')
+ vserver = 'vserver'
+ attributes = {'num-records': 1,
+ 'attributes-list': {'vserver-motd-info': {'vserver': vserver}}}
+ if data is not None and data.get('motd_message'):
+ attributes['attributes-list']['vserver-motd-info']['message'] = data['motd_message']
+ if data is not None and data.get('show_cluster_motd') is False:
+ attributes['attributes-list']['vserver-motd-info']['is-cluster-message-enabled'] = 'false'
+ else:
+ attributes['attributes-list']['vserver-motd-info']['is-cluster-message-enabled'] = 'true'
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_login_banner '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def mock_args(self):
+ return {
+ 'vserver': 'vserver',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_login_mock_object(self, cx_type='zapi', kind=None, status=None):
+ banner_obj = messages_module()
+ netapp_utils.ems_log_event = Mock(return_value=None)
+ if cx_type == 'zapi':
+ if kind is None:
+ banner_obj.server = MockONTAPConnection()
+ else:
+ banner_obj.server = MockONTAPConnection(kind=kind, data=status)
+ return banner_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ messages_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_successfully_create_banner(self):
+ data = self.mock_args()
+ data['banner'] = 'test banner'
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_login_mock_object('zapi', 'create', data).apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_banner_idempotency(self):
+ data = self.mock_args()
+ data['banner'] = 'test banner'
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_login_mock_object('zapi', 'create_idempotency', data).apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_create_motd(self):
+ data = self.mock_args()
+ data['motd_message'] = 'test message'
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_login_mock_object('zapi', 'create', data).apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_motd_idempotency(self):
+ data = self.mock_args()
+ data['motd_message'] = 'test message'
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_login_mock_object('zapi', 'create_idempotency', data).apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_get_banner_error(self):
+ data = self.mock_args()
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_login_mock_object('zapi', 'error', data).apply()
+ assert exc.value.args[0]['msg'] == 'Error fetching login_banner info: NetApp API failed. Reason - test:expect error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_login_messages.NetAppOntapLoginMessages.get_banner_motd')
+ def test_modify_banner_error(self, get_info):
+ data = self.mock_args()
+ data['banner'] = 'modify to new banner'
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ get_info.side_effect = [
+ {
+ 'banner': 'old banner',
+ 'motd': ''
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_login_mock_object('zapi', 'error', data).apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying login_banner: NetApp API failed. Reason - test:expect error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_login_messages.NetAppOntapLoginMessages.get_banner_motd')
+ def test_modify_motd_error(self, get_info):
+ data = self.mock_args()
+ data['motd_message'] = 'modify to new motd'
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ get_info.side_effect = [
+ {
+ 'motd': 'old motd',
+ 'show_cluster_motd': False
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_login_mock_object('zapi', 'error', data).apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying motd: NetApp API failed. Reason - test:expect error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successfully_create_banner_rest(self, mock_request):
+ data = self.mock_args()
+ data['banner'] = 'test banner'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['svm_uuid'],
+ SRR['empty_good'], # get
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_login_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_banner_error_rest(self, mock_request):
+ data = self.mock_args()
+ data['banner'] = 'test banner'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['svm_uuid'],
+ SRR['generic_error'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_login_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == 'Error when fetching login_banner info: Expected error'
+
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['svm_uuid'],
+ SRR['empty_good'], # get
+ SRR['generic_error'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_login_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == 'Error when modifying banner: Expected error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun.py
new file mode 100644
index 00000000..07d3c2f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun.py
@@ -0,0 +1,177 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun \
+ import NetAppOntapLUN as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'lun':
+ xml = self.build_lun_info(self.parm1)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_lun_info(lun_name):
+ ''' build xml data for lun-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ lun = dict(
+ lun_info=dict(
+ path="/what/ever/%s" % lun_name,
+ size=10
+ )
+ )
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': [lun]
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_lun_args = {
+ 'vserver': 'ansible',
+ 'name': 'lun_name',
+ 'flexvol_name': 'vol_name',
+ 'state': 'present'
+ }
+
+ def mock_args(self):
+
+ return {
+ 'vserver': self.mock_lun_args['vserver'],
+ 'name': self.mock_lun_args['name'],
+ 'flexvol_name': self.mock_lun_args['flexvol_name'],
+ 'state': self.mock_lun_args['state'],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+ # self.server = MockONTAPConnection()
+
+ def get_lun_mock_object(self, kind=None, parm1=None):
+ """
+ Helper method to return an na_ontap_lun object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_interface object
+ """
+ lun_obj = my_module()
+ lun_obj.autosupport_log = Mock(return_value=None)
+ lun_obj.server = MockONTAPConnection(kind=kind, parm1=parm1)
+ return lun_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if required param 'destination_vserver' is not specified'''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_lun_mock_object().apply()
+ msg = 'size is a required parameter for create.'
+ assert msg == exc.value.args[0]['msg']
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ data = dict(self.mock_args())
+ data['size'] = 5
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_rename_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_mock_object('lun', 'lun_name').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_rename(self):
+ ''' Test successful create '''
+ data = dict(self.mock_args())
+ data['from_name'] = 'lun_from_name'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_mock_object('lun', 'lun_from_name').apply()
+ assert exc.value.args[0]['changed']
+ assert 'renamed' in exc.value.args[0]
+
+ def test_failed_rename(self):
+ ''' Test failed rename '''
+ data = dict(self.mock_args())
+ data['from_name'] = 'lun_from_name'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_lun_mock_object('lun', 'other_lun_name').apply()
+ msg = 'Error renaming lun: lun_from_name does not exist'
+ assert msg == exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_copy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_copy.py
new file mode 100644
index 00000000..cafa9105
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_copy.py
@@ -0,0 +1,155 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_copy \
+ import NetAppOntapLUNCopy as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'destination_vserver':
+ xml = self.build_lun_info()
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_lun_info():
+ ''' build xml data for lun-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_lun_copy = {
+ 'source_vserver': 'ansible',
+ 'destination_path': '/vol/test/test_copy_dest_dest_new_reviewd_new',
+ 'source_path': '/vol/test/test_copy_1',
+ 'destination_vserver': 'ansible',
+ 'state': 'present'
+ }
+
+ def mock_args(self):
+
+ return {
+ 'source_vserver': self.mock_lun_copy['source_vserver'],
+ 'destination_path': self.mock_lun_copy['destination_path'],
+ 'source_path': self.mock_lun_copy['source_path'],
+ 'destination_vserver': self.mock_lun_copy['destination_vserver'],
+ 'state': self.mock_lun_copy['state'],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+ # self.server = MockONTAPConnection()
+
+ def get_lun_copy_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_lun_copy object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_interface object
+ """
+ lun_copy_obj = my_module()
+ lun_copy_obj.autosupport_log = Mock(return_value=None)
+ if kind is None:
+ lun_copy_obj.server = MockONTAPConnection()
+ else:
+ lun_copy_obj.server = MockONTAPConnection(kind=kind)
+ return lun_copy_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if required param 'destination_vserver' is not specified'''
+ data = self.mock_args()
+ del data['destination_vserver']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_lun_copy_mock_object('lun_copy').copy_lun()
+ msg = 'missing required arguments: destination_vserver'
+ assert msg == exc.value.args[0]['msg']
+
+ def test_successful_copy(self):
+ ''' Test successful create '''
+ # data = self.mock_args()
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_copy_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_copy_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_copy_mock_object('destination_vserver').apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map.py
new file mode 100644
index 00000000..a904a516
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map.py
@@ -0,0 +1,192 @@
+''' unit tests ONTAP Ansible module: na_ontap_lun_map '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_map \
+ import NetAppOntapLUNMap as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'lun_map':
+ xml = self.build_lun_info()
+ elif self.type == 'lun_map_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_lun_info():
+ ''' build xml data for lun-map-entry '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'initiator-groups': [{'initiator-group-info': {'initiator-group-name': 'ansible', 'lun-id': 2}}]}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ initiator_group_name = 'ansible'
+ vserver = 'ansible'
+ path = '/vol/ansible/test'
+ lun_id = 2
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ initiator_group_name = 'ansible'
+ vserver = 'ansible'
+ path = '/vol/ansible/test'
+ lun_id = 2
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'initiator_group_name': initiator_group_name,
+ 'vserver': vserver,
+ 'path': path,
+ 'lun_id': lun_id
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_lun_map for non-existent lun'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_lun_map is not None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_lun_map for existing lun'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='lun_map')
+ assert my_obj.get_lun_map()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_map.NetAppOntapLUNMap.create_lun_map')
+ def test_successful_create(self, create_lun_map):
+ ''' mapping lun and testing idempotency '''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_lun_map.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('lun_map')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_map.NetAppOntapLUNMap.delete_lun_map')
+ def test_successful_delete(self, delete_lun_map):
+ ''' unmapping lun and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('lun_map')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_lun_map.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('lun_map_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_lun_map()
+ assert 'Error mapping lun' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_lun_map()
+ assert 'Error unmapping lun' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_rest.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_rest.py
new file mode 100644
index 00000000..9ed07535
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_rest.py
@@ -0,0 +1,277 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun \
+ import NetAppOntapLUN as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_apps_empty': (200,
+ {'records': [],
+ 'num_records': 0
+ },
+ None
+ ),
+ 'get_apps_found': (200,
+ {'records': [dict(name='san_appli', uuid='1234')],
+ 'num_records': 1
+ },
+ None
+ ),
+ 'get_app_components': (200,
+ {'records': [dict(name='san_appli', uuid='1234')],
+ 'num_records': 1
+ },
+ None
+ ),
+ 'get_app_component_details': (200,
+ {'backing_storage': dict(luns=[]),
+ },
+ None
+ ),
+}
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'lun':
+ xml = self.build_lun_info(self.parm1)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_lun_info(lun_name):
+ ''' build xml data for lun-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ lun = dict(
+ lun_info=dict(
+ path="/what/ever/%s" % lun_name,
+ size=10
+ )
+ )
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': [lun]
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_lun_args = {
+ 'vserver': 'ansible',
+ 'name': 'lun_name',
+ 'flexvol_name': 'vol_name',
+ 'state': 'present'
+ }
+
+ def mock_args(self):
+ return {
+ 'vserver': self.mock_lun_args['vserver'],
+ 'name': self.mock_lun_args['name'],
+ 'flexvol_name': self.mock_lun_args['flexvol_name'],
+ 'state': self.mock_lun_args['state'],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+ # self.server = MockONTAPConnection()
+
+ def get_lun_mock_object(self, kind=None, parm1=None):
+ """
+ Helper method to return an na_ontap_lun object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_interface object
+ """
+ lun_obj = my_module()
+ lun_obj.autosupport_log = Mock(return_value=None)
+ lun_obj.server = MockONTAPConnection(kind=kind, parm1=parm1)
+ return lun_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if required param 'destination_vserver' is not specified'''
+ data = self.mock_args()
+ set_module_args(data)
+ data.pop('flexvol_name')
+ data['san_application_template'] = dict(name='san_appli')
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_lun_mock_object().apply()
+ msg = 'size is a required parameter for create.'
+ assert msg == exc.value.args[0]['msg']
+
+ def test_create_error_missing_param2(self):
+ ''' Test if create throws an error if required param 'destination_vserver' is not specified'''
+ data = self.mock_args()
+ data.pop('flexvol_name')
+ data['size'] = 5
+ data['san_application_template'] = dict(lun_count=6)
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_lun_mock_object().apply()
+ msg = 'missing required arguments: name found in san_application_template'
+ assert msg == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_create_appli(self, mock_request):
+ ''' Test successful create '''
+ mock_request.side_effect = [
+ SRR['get_apps_empty'], # GET application/applications
+ SRR['empty_good'], # POST application/applications
+ SRR['end_of_sequence']
+ ]
+ data = dict(self.mock_args())
+ data['size'] = 5
+ data.pop('flexvol_name')
+ data['san_application_template'] = dict(name='san_appli')
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_create_appli_idem(self, mock_request):
+ ''' Test successful create idempotent '''
+ mock_request.side_effect = [
+ SRR['get_apps_found'], # GET application/applications
+ SRR['get_apps_found'], # GET application/applications/<uuid>/components
+ SRR['get_app_component_details'], # GET application/applications/<uuid>/components/<cuuid>
+ SRR['end_of_sequence']
+ ]
+ data = dict(self.mock_args())
+ data['size'] = 5
+ data.pop('flexvol_name')
+ data['san_application_template'] = dict(name='san_appli')
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_create_appli_idem_no_comp(self, mock_request):
+ ''' Test successful create idempotent '''
+ mock_request.side_effect = [
+ SRR['get_apps_found'], # GET application/applications
+ SRR['get_apps_empty'], # GET application/applications/<uuid>/components
+ SRR['end_of_sequence']
+ ]
+ data = dict(self.mock_args())
+ data['size'] = 5
+ data.pop('flexvol_name')
+ data['san_application_template'] = dict(name='san_appli')
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_lun_mock_object().apply()
+ msg = 'Error: no component for application san_appli'
+ assert msg == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_delete_appli(self, mock_request):
+ ''' Test successful create '''
+ mock_request.side_effect = [
+ SRR['get_apps_found'], # GET application/applications
+ SRR['empty_good'], # POST application/applications
+ SRR['end_of_sequence']
+ ]
+ data = dict(self.mock_args())
+ data['size'] = 5
+ data.pop('flexvol_name')
+ data['san_application_template'] = dict(name='san_appli')
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_delete_appli_idem(self, mock_request):
+ ''' Test successful deelte idempotent '''
+ mock_request.side_effect = [
+ SRR['get_apps_empty'], # GET application/applications
+ SRR['end_of_sequence']
+ ]
+ data = dict(self.mock_args())
+ data['size'] = 5
+ data.pop('flexvol_name')
+ data['san_application_template'] = dict(name='san_appli')
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_lun_mock_object().apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py
new file mode 100644
index 00000000..33da1819
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py
@@ -0,0 +1,156 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_metrocluster '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_mcc_mediator \
+ import NetAppOntapMccipMediator as mediator_module # module under test
+
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_mediator_with_no_results': (200, {'num_records': 0}, None),
+ 'get_mediator_with_results': (200, {
+ 'num_records': 1,
+ 'records': [{
+ 'ip_address': '10.10.10.10',
+ 'uuid': 'ebe27c49-1adf-4496-8335-ab862aebebf2'
+ }]
+ }, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ """ Unit tests for na_ontap_metrocluster """
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_mediator = {
+ 'mediator_address': '10.10.10.10',
+ 'mediator_user': 'carchi',
+ 'mediator_password': 'netapp1!'
+ }
+
+ def mock_args(self):
+ return {
+ 'mediator_address': self.mock_mediator['mediator_address'],
+ 'mediator_user': self.mock_mediator['mediator_user'],
+ 'mediator_password': self.mock_mediator['mediator_password'],
+ 'hostname': 'test_host',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_alias_mock_object(self):
+ alias_obj = mediator_module()
+ return alias_obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ """Test successful rest create"""
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_mediator_with_no_results'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create_idempotency(self, mock_request):
+ """Test successful rest create"""
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_mediator_with_results'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_delete(self, mock_request):
+ """Test successful rest create"""
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_mediator_with_results'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_delete(self, mock_request):
+ """Test successful rest create"""
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_mediator_with_no_results'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster.py
new file mode 100644
index 00000000..169ab9c3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster.py
@@ -0,0 +1,149 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_metrocluster '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_metrocluster \
+ import NetAppONTAPMetroCluster as metrocluster_module # module under test
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_metrocluster_with_results': (200, {"local": {
+ "cluster": {
+ 'name': 'cluster1'
+ },
+ "configuration_state": "configuration_error", # TODO: put correct state
+ "partner_cluster_reachable": "true",
+ }}, None),
+ 'get_metrocluster_with_no_results': (200, None, None),
+ 'metrocluster_post': (200, {'job': {
+ 'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7',
+ '_links': {
+ 'self': {
+ 'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}}}
+ }, None),
+ 'job': (200, {
+ "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14",
+ "description": "POST /api/cluster/metrocluster",
+ "state": "success",
+ "message": "There are not enough disks in Pool1.",
+ "code": 2432836,
+ "start_time": "2020-02-26T10:35:44-08:00",
+ "end_time": "2020-02-26T10:47:38-08:00",
+ "_links": {
+ "self": {
+ "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14"
+ }
+ }
+ }, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ """ Unit tests for na_ontap_metrocluster """
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_metrocluster = {
+ 'partner_cluster_name': 'cluster1',
+ 'node_name': 'carchi_vsim1',
+ 'partner_node_name': 'carchi_vsim3'
+ }
+
+ def mock_args(self):
+ return {
+ 'dr_pairs': [{
+ 'node_name': self.mock_metrocluster['node_name'],
+ 'partner_node_name': self.mock_metrocluster['partner_node_name'],
+ }],
+ 'partner_cluster_name': self.mock_metrocluster['partner_cluster_name'],
+ 'hostname': 'test_host',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_alias_mock_object(self):
+ alias_obj = metrocluster_module()
+ return alias_obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ """Test successful rest create"""
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_metrocluster_with_no_results'],
+ SRR['metrocluster_post'],
+ SRR['job'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_idempotency(self, mock_request):
+ """Test rest create idempotency"""
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_metrocluster_with_results'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py
new file mode 100644
index 00000000..df349da4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py
@@ -0,0 +1,196 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_metrocluster '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_metrocluster_dr_group \
+ import NetAppONTAPMetroClusterDRGroup as mcc_dr_pairs_module # module under test
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_mcc_dr_pair_with_no_results': (200, {'records': [], 'num_records': 0}, None),
+ 'get_mcc_dr_pair_with_results': (200, {'records': [{'partner_cluster': {'name': 'rha2-b2b1_siteB'},
+ 'dr_pairs': [{'node': {'name': 'rha17-a2'},
+ 'partner': {'name': 'rha17-b2'}},
+ {'node': {'name': 'rha17-b2'},
+ 'partner': {'name': 'rha17-b1'}}],
+ 'id': '2'}],
+ 'num_records': 1}, None),
+ 'mcc_dr_pair_post': (200, {'job': {
+ 'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7',
+ '_links': {
+ 'self': {
+ 'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}}}
+ }, None),
+ 'get_mcc_dr_node': (200, {'records': [{'dr_group_id': '1'}], 'num_records': 1}, None),
+ 'get_mcc_dr_node_none': (200, {'records': [], 'num_records': 0}, None),
+ 'job': (200, {
+ "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14",
+ "description": "POST /api/cluster/metrocluster",
+ "state": "success",
+ "message": "There are not enough disks in Pool1.",
+ "code": 2432836,
+ "start_time": "2020-02-26T10:35:44-08:00",
+ "end_time": "2020-02-26T10:47:38-08:00",
+ "_links": {
+ "self": {
+ "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14"
+ }
+ }
+ }, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ """ Unit tests for na_ontap_metrocluster """
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_mcc_dr_pair = {
+ 'partner_cluster_name': 'rha2-b2b1_siteB',
+ 'node_name': 'rha17-a2',
+ 'partner_node_name': 'rha17-b2',
+ 'node_name2': 'rha17-b2',
+ 'partner_node_name2': 'rha17-b1'
+
+ }
+
+ def mock_args(self):
+ return {
+ 'dr_pairs': [{
+ 'node_name': self.mock_mcc_dr_pair['node_name'],
+ 'partner_node_name': self.mock_mcc_dr_pair['partner_node_name'],
+ }, {
+ 'node_name': self.mock_mcc_dr_pair['node_name2'],
+ 'partner_node_name': self.mock_mcc_dr_pair['partner_node_name2'],
+ }],
+ 'partner_cluster_name': self.mock_mcc_dr_pair['partner_cluster_name'],
+ 'hostname': 'test_host',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_alias_mock_object(self):
+ alias_obj = mcc_dr_pairs_module()
+ return alias_obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ """Test successful rest create"""
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_mcc_dr_pair_with_no_results'],
+ SRR['get_mcc_dr_pair_with_no_results'],
+ SRR['mcc_dr_pair_post'],
+ SRR['job'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_idempotency(self, mock_request):
+ """Test rest create idempotency"""
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_mcc_dr_pair_with_results'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_delete(self, mock_request):
+ """Test successful rest delete"""
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_mcc_dr_pair_with_results'],
+ SRR['mcc_dr_pair_post'],
+ SRR['job'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_delete_idempotency(self, mock_request):
+ """Test rest delete idempotency"""
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_mcc_dr_pair_with_no_results'],
+ SRR['get_mcc_dr_pair_with_no_results'],
+ SRR['get_mcc_dr_node_none'],
+ SRR['get_mcc_dr_node_none'],
+ SRR['job'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_motd.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_motd.py
new file mode 100644
index 00000000..5522986d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_motd.py
@@ -0,0 +1,182 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests for Ansible module: na_ontap_motd """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_motd \
+ import NetAppONTAPMotd as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'motd':
+ xml = self.build_motd_info()
+ elif self.type == 'motd_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_motd_info():
+ ''' build xml data for motd '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'vserver-motd-info': {'message': 'ansible',
+ 'vserver': 'ansible',
+ 'is-cluster-message-enabled': 'true'}}}
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ # whether to use a mock or a simulator
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ message = 'ansible'
+ vserver = 'ansible'
+ show_cluster_motd = 'true'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ message = 'ansible'
+ vserver = 'ansible'
+ show_cluster_motd = 'true'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'message': message,
+ 'vserver': vserver,
+ 'show_cluster_motd': show_cluster_motd
+ })
+
+ def call_command(self, module_args):
+ ''' utility function to call apply '''
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('motd')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ return exc.value.args[0]['changed']
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_motd_get_called(self):
+ ''' fetching details of motd '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.motd_get() is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test for existing motd'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='motd')
+ assert my_obj.motd_get()
+
+ def test_motd_create(self):
+ ''' test for creating motd'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection(kind='motd')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_motd_delete(self):
+ ''' test for deleting motd'''
+ module_args = {
+ 'state': 'absent',
+ }
+ changed = self.call_command(module_args)
+ assert changed
+
+ def test_if_all_methods_catch_exception(self):
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('motd_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.motd_get()
+ assert '' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_motd()
+ assert 'Error creating motd: ' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_service_switch.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_service_switch.py
new file mode 100644
index 00000000..d6b0ce72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_service_switch.py
@@ -0,0 +1,180 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_volume_export_policy '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_name_service_switch \
+ import NetAppONTAPNsswitch as nss_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'nss':
+ xml = self.build_nss_info(self.params)
+ if self.kind == 'error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_nss_info(nss_details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'namservice-nsswitch-config-info': {
+ 'nameservice-database': nss_details['database_type'],
+ 'nameservice-sources': {
+ 'nss-source-type': nss_details['sources']
+ }
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_name_service_switch '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_nss = {
+ 'state': 'present',
+ 'vserver': 'test_vserver',
+ 'database_type': 'namemap',
+ 'sources': 'files,ldap',
+ }
+
+ def mock_args(self):
+ return {
+ 'state': self.mock_nss['state'],
+ 'vserver': self.mock_nss['vserver'],
+ 'database_type': self.mock_nss['database_type'],
+ 'sources': self.mock_nss['sources'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_nss_object(self, kind=None):
+ nss_obj = nss_module()
+ nss_obj.asup_log_for_cserver = Mock(return_value=None)
+ if kind is None:
+ nss_obj.server = MockONTAPConnection()
+ else:
+ nss_obj.server = MockONTAPConnection(kind=kind, data=self.mock_nss)
+ return nss_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ nss_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_nss(self):
+ set_module_args(self.mock_args())
+ result = self.get_nss_object().get_name_service_switch()
+ assert result is None
+
+ def test_get_existing_nss(self):
+ set_module_args(self.mock_args())
+ result = self.get_nss_object('nss').get_name_service_switch()
+ assert result
+
+ def test_successfully_create(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_nss_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successfully_modify(self):
+ data = self.mock_args()
+ data['sources'] = 'files'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_nss_object('nss').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successfully_delete(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_nss_object('nss').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_error(self):
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_nss_object('error').create_name_service_switch()
+ print(exc)
+ assert exc.value.args[0]['msg'] == 'Error on creating name service switch config on vserver test_vserver: NetApp API failed. Reason - test:error'
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_nss_object('error').modify_name_service_switch({})
+ print(exc)
+ assert exc.value.args[0]['msg'] == 'Error on modifying name service switch config on vserver test_vserver: NetApp API failed. Reason - test:error'
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_nss_object('error').delete_name_service_switch()
+ print(exc)
+ assert exc.value.args[0]['msg'] == 'Error on deleting name service switch config on vserver test_vserver: NetApp API failed. Reason - test:error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ndmp.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ndmp.py
new file mode 100644
index 00000000..6fc9b89e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ndmp.py
@@ -0,0 +1,227 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ndmp \
+ import NetAppONTAPNdmp as ndmp_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'get_uuid': (200, {'records': [{'uuid': 'testuuid'}]}, None),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, 'Error fetching ndmp from ansible: NetApp API failed. Reason - Unexpected error:',
+ "REST API currently does not support 'backup_log_enable, ignore_ctime_enabled'"),
+ 'get_ndmp_uuid': (200, {"records": [{"svm": {"name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}}]}, None),
+ 'get_ndmp': (200, {"enabled": True, "authentication_types": ["test"],
+ "records": [{"svm": {"name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}}]}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'ndmp':
+ xml = self.build_ndmp_info(self.data)
+ if self.type == 'error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_ndmp_info(ndmp_details):
+ ''' build xml data for ndmp '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'ndmp-vserver-attributes-info': {
+ 'ignore_ctime_enabled': ndmp_details['ignore_ctime_enabled'],
+ 'backup_log_enable': ndmp_details['backup_log_enable'],
+
+ 'authtype': [
+ {'ndmpd-authtypes': 'plaintext'},
+ {'ndmpd-authtypes': 'challenge'}
+ ]
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_ndmp = {
+ 'ignore_ctime_enabled': True,
+ 'backup_log_enable': 'false',
+ 'authtype': 'plaintext',
+ 'enable': True
+ }
+
+ def mock_args(self, rest=False):
+ if rest:
+ return {
+ 'authtype': self.mock_ndmp['authtype'],
+ 'enable': True,
+ 'vserver': 'ansible',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': 'False'
+ }
+ else:
+ return {
+ 'vserver': 'ansible',
+ 'authtype': self.mock_ndmp['authtype'],
+ 'ignore_ctime_enabled': self.mock_ndmp['ignore_ctime_enabled'],
+ 'backup_log_enable': self.mock_ndmp['backup_log_enable'],
+ 'enable': self.mock_ndmp['enable'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_ndmp_mock_object(self, kind=None, cx_type='zapi'):
+ """
+ Helper method to return an na_ontap_ndmp object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_ndmp object
+ """
+ obj = ndmp_module()
+ if cx_type == 'zapi':
+ obj.asup_log_for_cserver = Mock(return_value=None)
+ obj.server = Mock()
+ obj.server.invoke_successfully = Mock()
+ if kind is None:
+ obj.server = MockONTAPConnection()
+ else:
+ obj.server = MockONTAPConnection(kind=kind, data=self.mock_ndmp)
+ return obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ndmp.NetAppONTAPNdmp.ndmp_get_iter')
+ def test_successful_modify(self, ger_ndmp):
+ ''' Test successful modify ndmp'''
+ data = self.mock_args()
+ set_module_args(data)
+ current = {
+ 'ignore_ctime_enabled': False,
+ 'backup_log_enable': True
+ }
+ ger_ndmp.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ndmp_mock_object('ndmp').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ndmp.NetAppONTAPNdmp.ndmp_get_iter')
+ def test_modify_error(self, ger_ndmp):
+ ''' Test modify error '''
+ data = self.mock_args()
+ set_module_args(data)
+ current = {
+ 'ignore_ctime_enabled': False,
+ 'backup_log_enable': True
+ }
+ ger_ndmp.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ndmp_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying ndmp on ansible: NetApp API failed. Reason - test:error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = self.mock_args()
+ data['use_rest'] = 'Always'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ndmp_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][3]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_modify(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['use_rest'] = 'Always'
+ set_module_args(data)
+ mock_request.side_effect = [
+ # SRR['is_rest'], # WHY IS IT NOT CALLED HERE?
+ SRR['get_ndmp_uuid'], # for get svm uuid: protocols/ndmp/svms
+ SRR['get_ndmp'], # for get ndmp details: '/protocols/ndmp/svms/' + uuid
+ SRR['get_ndmp_uuid'], # for get svm uuid: protocols/ndmp/svms (before modify)
+ SRR['empty_good'], # modify (patch)
+ SRR['end_of_sequence'],
+ ]
+ my_obj = ndmp_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py
new file mode 100644
index 00000000..f849c35a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py
@@ -0,0 +1,299 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp \
+ import NetAppOntapIfGrp as ifgrp_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'ifgrp':
+ xml = self.build_ifgrp_info(self.params)
+ elif self.kind == 'ifgrp-ports':
+ xml = self.build_ifgrp_ports_info(self.params)
+ elif self.kind == 'ifgrp-fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_ifgrp_info(ifgrp_details):
+ ''' build xml data for ifgrp-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'net-port-info': {
+ 'port': ifgrp_details['name'],
+ 'ifgrp-distribution-function': 'mac',
+ 'ifgrp-mode': ifgrp_details['mode'],
+ 'node': ifgrp_details['node']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_ifgrp_ports_info(data):
+ ''' build xml data for ifgrp-ports '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'attributes': {
+ 'net-ifgrp-info': {
+ 'ports': [
+ {'lif-bindable': data['ports'][0]},
+ {'lif-bindable': data['ports'][1]},
+ {'lif-bindable': data['ports'][2]}
+ ]
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_ifgrp = {
+ 'name': 'test',
+ 'port': 'a1',
+ 'node': 'test_vserver',
+ 'mode': 'something'
+ }
+
+ def mock_args(self):
+ return {
+ 'name': self.mock_ifgrp['name'],
+ 'distribution_function': 'mac',
+ 'ports': [self.mock_ifgrp['port']],
+ 'node': self.mock_ifgrp['node'],
+ 'mode': self.mock_ifgrp['mode'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_ifgrp_mock_object(self, kind=None, data=None):
+ """
+ Helper method to return an na_ontap_net_ifgrp object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_net_ifgrp object
+ """
+ obj = ifgrp_module()
+ obj.autosupport_log = Mock(return_value=None)
+ if data is None:
+ data = self.mock_ifgrp
+ obj.server = MockONTAPConnection(kind=kind, data=data)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ ifgrp_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_ifgrp(self):
+ ''' Test if get_ifgrp returns None for non-existent ifgrp '''
+ set_module_args(self.mock_args())
+ result = self.get_ifgrp_mock_object().get_if_grp()
+ assert result is None
+
+ def test_get_existing_ifgrp(self):
+ ''' Test if get_ifgrp returns details for existing ifgrp '''
+ set_module_args(self.mock_args())
+ result = self.get_ifgrp_mock_object('ifgrp').get_if_grp()
+ assert result['name'] == self.mock_ifgrp['name']
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ifgrp_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_delete(self):
+ ''' Test delete existing volume '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ifgrp_mock_object('ifgrp').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ ''' Test delete existing volume '''
+ data = self.mock_args()
+ data['ports'] = ['1', '2', '3']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ifgrp_mock_object('ifgrp').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.get_if_grp')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.create_if_grp')
+ def test_create_called(self, create_ifgrp, get_ifgrp):
+ data = self.mock_args()
+ set_module_args(data)
+ get_ifgrp.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ifgrp_mock_object().apply()
+ get_ifgrp.assert_called_with()
+ create_ifgrp.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.add_port_to_if_grp')
+ def test_if_ports_are_added_after_create(self, add_ports):
+ ''' Test successful create '''
+ data = self.mock_args()
+ set_module_args(data)
+ self.get_ifgrp_mock_object().create_if_grp()
+ add_ports.assert_called_with('a1')
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.get_if_grp')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.delete_if_grp')
+ def test_delete_called(self, delete_ifgrp, get_ifgrp):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ get_ifgrp.return_value = Mock()
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ifgrp_mock_object().apply()
+ get_ifgrp.assert_called_with()
+ delete_ifgrp.assert_called_with()
+
+ def test_get_return_value(self):
+ data = self.mock_args()
+ set_module_args(data)
+ result = self.get_ifgrp_mock_object('ifgrp').get_if_grp()
+ assert result['name'] == data['name']
+ assert result['mode'] == data['mode']
+ assert result['node'] == data['node']
+
+ def test_get_ports_list(self):
+ data = self.mock_args()
+ data['ports'] = ['e0a', 'e0b', 'e0c']
+ set_module_args(data)
+ result = self.get_ifgrp_mock_object('ifgrp-ports', data).get_if_grp_ports()
+ assert result['ports'] == data['ports']
+
+ def test_add_port_packet(self):
+ data = self.mock_args()
+ set_module_args(data)
+ obj = self.get_ifgrp_mock_object('ifgrp')
+ obj.add_port_to_if_grp('addme')
+ assert obj.server.xml_in['port'] == 'addme'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.remove_port_to_if_grp')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.add_port_to_if_grp')
+ def test_modify_ports_calls_remove_existing_ports(self, add_port, remove_port):
+ ''' Test if already existing ports are not being added again '''
+ data = self.mock_args()
+ data['ports'] = ['1', '2']
+ set_module_args(data)
+ self.get_ifgrp_mock_object('ifgrp').modify_ports(current_ports=['1', '2', '3'])
+ assert remove_port.call_count == 1
+ assert add_port.call_count == 0
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.remove_port_to_if_grp')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.add_port_to_if_grp')
+ def test_modify_ports_calls_add_new_ports(self, add_port, remove_port):
+ ''' Test new ports are added '''
+ data = self.mock_args()
+ data['ports'] = ['1', '2', '3', '4']
+ set_module_args(data)
+ self.get_ifgrp_mock_object('ifgrp').modify_ports(current_ports=['1', '2'])
+ assert remove_port.call_count == 0
+ assert add_port.call_count == 2
+
+ def test_get_ports_returns_none(self):
+ set_module_args(self.mock_args())
+ result = self.get_ifgrp_mock_object().get_if_grp_ports()
+ assert result['ports'] == []
+ result = self.get_ifgrp_mock_object().get_if_grp()
+ assert result is None
+
+ def test_if_all_methods_catch_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ifgrp_mock_object('ifgrp-fail').get_if_grp()
+ assert 'Error getting if_group test' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ifgrp_mock_object('ifgrp-fail').create_if_grp()
+ assert 'Error creating if_group test' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ifgrp_mock_object('ifgrp-fail').get_if_grp_ports()
+ assert 'Error getting if_group ports test' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ifgrp_mock_object('ifgrp-fail').add_port_to_if_grp('test-port')
+ assert 'Error adding port test-port to if_group test' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ifgrp_mock_object('ifgrp-fail').remove_port_to_if_grp('test-port')
+ assert 'Error removing port test-port to if_group test' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_ifgrp_mock_object('ifgrp-fail').delete_if_grp()
+ assert 'Error deleting if_group test' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_port.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_port.py
new file mode 100644
index 00000000..7c16c243
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_port.py
@@ -0,0 +1,180 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_port \
+ import NetAppOntapNetPort as port_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'port':
+ xml = self.build_port_info(self.data)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_port_info(port_details):
+ ''' build xml data for net-port-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'net-port-info': {
+ # 'port': port_details['port'],
+ 'mtu': port_details['mtu'],
+ 'is-administrative-auto-negotiate': 'true',
+ 'ipspace': 'default',
+ 'administrative-flowcontrol': port_details['flowcontrol_admin'],
+ 'node': port_details['node']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_port = {
+ 'node': 'test',
+ 'ports': 'a1',
+ 'flowcontrol_admin': 'something',
+ 'mtu': '1000'
+ }
+
+ def mock_args(self):
+ return {
+ 'node': self.mock_port['node'],
+ 'flowcontrol_admin': self.mock_port['flowcontrol_admin'],
+ 'ports': [self.mock_port['ports']],
+ 'mtu': self.mock_port['mtu'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_port_mock_object(self, kind=None, data=None):
+ """
+ Helper method to return an na_ontap_net_port object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_net_port object
+ """
+ obj = port_module()
+ obj.autosupport_log = Mock(return_value=None)
+ if data is None:
+ data = self.mock_port
+ obj.server = MockONTAPConnection(kind=kind, data=data)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ port_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_port(self):
+ ''' Test if get_net_port returns None for non-existent port '''
+ set_module_args(self.mock_args())
+ result = self.get_port_mock_object().get_net_port('test')
+ assert result is None
+
+ def test_get_existing_port(self):
+ ''' Test if get_net_port returns details for existing port '''
+ set_module_args(self.mock_args())
+ result = self.get_port_mock_object('port').get_net_port('test')
+ assert result['mtu'] == self.mock_port['mtu']
+ assert result['flowcontrol_admin'] == self.mock_port['flowcontrol_admin']
+
+ def test_successful_modify(self):
+ ''' Test modify_net_port '''
+ data = self.mock_args()
+ data['mtu'] = '2000'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object('port').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_multiple_ports(self):
+ ''' Test modify_net_port '''
+ data = self.mock_args()
+ data['ports'] = ['a1', 'a2']
+ data['mtu'] = '2000'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object('port').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_port.NetAppOntapNetPort.get_net_port')
+ def test_get_called(self, get_port):
+ ''' Test get_net_port '''
+ data = self.mock_args()
+ data['ports'] = ['a1', 'a2']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object('port').apply()
+ assert get_port.call_count == 2
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_routes.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_routes.py
new file mode 100644
index 00000000..ab7d57bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_routes.py
@@ -0,0 +1,461 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_routes \
+ import NetAppOntapNetRoutes as net_route_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'net_routes_record': (200,
+ {'records': [{"destination": {"address": "176.0.0.0",
+ "netmask": "24",
+ "family": "ipv4"},
+ "gateway": '10.193.72.1',
+ "uuid": '1cd8a442-86d1-11e0-ae1c-123478563412',
+ "svm": {"name": "test_vserver"}}]}, None),
+ 'modified_record': (200,
+ {'records': [{"destination": {"address": "0.0.0.0",
+ "netmask": "0",
+ "family": "ipv4"},
+ "gateway": "10.193.72.1",
+ "uuid": '1cd8a442-86d1-11e0-ae1c-123478563412',
+ "svm": {"name": "test_vserver"}}]}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'net_route':
+ xml = self.build_net_route_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_net_route_info(net_route_details):
+ ''' build xml data for net_route-attributes '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'attributes': {
+ 'net-vs-routes-info': {
+ 'address-family': 'ipv4',
+ 'destination': net_route_details['destination'],
+ 'gateway': net_route_details['gateway'],
+ 'metric': net_route_details['metric'],
+ 'vserver': net_route_details['vserver']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_net_route = {
+ 'destination': '176.0.0.0/24',
+ 'gateway': '10.193.72.1',
+ 'vserver': 'test_vserver',
+ 'metric': 70
+ }
+
+ def mock_args(self, rest=False, modify=False):
+ if rest:
+ return {
+ 'vserver': self.mock_net_route['vserver'],
+ 'destination': self.mock_net_route['destination'],
+ 'gateway': self.mock_net_route['gateway'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+ elif modify:
+ return {
+ 'vserver': self.mock_net_route['vserver'],
+ 'destination': '0.0.0.0/0',
+ 'gateway': '10.193.72.1',
+ 'from_destination': self.mock_net_route['destination'],
+ 'from_gateway': self.mock_net_route['gateway'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+ else:
+ return {
+ 'vserver': self.mock_net_route['vserver'],
+ 'destination': self.mock_net_route['destination'],
+ 'gateway': self.mock_net_route['gateway'],
+ 'metric': self.mock_net_route['metric'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_net_route_mock_object(self, kind=None, data=None, cx_type='zapi'):
+ """
+ Helper method to return an na_ontap_net_route object
+ :param kind: passes this param to MockONTAPConnection()
+ :param data: passes this data to MockONTAPConnection()
+ :param type: differentiates zapi and rest procedure
+ :return: na_ontap_net_route object
+ """
+ net_route_obj = net_route_module()
+ if cx_type == 'zapi':
+ net_route_obj.ems_log_event = Mock(return_value=None)
+ net_route_obj.cluster = Mock()
+ net_route_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ net_route_obj.server = MockONTAPConnection()
+ else:
+ if data is None:
+ net_route_obj.server = MockONTAPConnection(kind='net_route', data=self.mock_net_route)
+ else:
+ net_route_obj.server = MockONTAPConnection(kind='net_route', data=data)
+ return net_route_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ net_route_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_net_route(self):
+ ''' Test if get_net_route returns None for non-existent net_route '''
+ set_module_args(self.mock_args())
+ result = self.get_net_route_mock_object().get_net_route()
+ assert result is None
+
+ def test_get_existing_job(self):
+ ''' Test if get_net_route returns details for existing net_route '''
+ set_module_args(self.mock_args())
+ result = self.get_net_route_mock_object('net_route').get_net_route()
+ assert result['destination'] == self.mock_net_route['destination']
+ assert result['gateway'] == self.mock_net_route['gateway']
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if destination is not specified'''
+ data = self.mock_args()
+ del data['destination']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_net_route_mock_object('net_route').create_net_route()
+ msg = 'missing required arguments: destination'
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_routes.NetAppOntapNetRoutes.create_net_route')
+ def test_successful_create(self, create_net_route):
+ ''' Test successful create '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ create_net_route.assert_called_with()
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ obj = self.get_net_route_mock_object('net_route')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_delete(self):
+ ''' Test successful delete '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object('net_route').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ ''' Test delete idempotency '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify_metric(self):
+ ''' Test successful modify metric '''
+ data = self.mock_args()
+ del data['metric']
+ data['from_metric'] = 70
+ data['metric'] = 40
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object('net_route').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_metric_idempotency(self):
+ ''' Test modify metric idempotency'''
+ data = self.mock_args()
+ data['metric'] = 70
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object('net_route').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_routes.NetAppOntapNetRoutes.get_net_route')
+ def test_successful_modify_gateway(self, get_net_route):
+ ''' Test successful modify gateway '''
+ data = self.mock_args()
+ del data['gateway']
+ data['from_gateway'] = '10.193.72.1'
+ data['gateway'] = '10.193.0.1'
+ set_module_args(data)
+ current = {
+ 'destination': '176.0.0.0/24',
+ 'gateway': '10.193.72.1',
+ 'metric': 70,
+ 'vserver': 'test_server'
+ }
+ get_net_route.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_routes.NetAppOntapNetRoutes.get_net_route')
+ def test__modify_gateway_idempotency(self, get_net_route):
+ ''' Test modify gateway idempotency '''
+ data = self.mock_args()
+ del data['gateway']
+ data['from_gateway'] = '10.193.72.1'
+ data['gateway'] = '10.193.0.1'
+ set_module_args(data)
+ current = {
+ 'destination': '178.0.0.1/24',
+ 'gateway': '10.193.72.1',
+ 'metric': 70,
+ 'vserver': 'test_server'
+ }
+ get_net_route.side_effect = [
+ current,
+ None
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object('net_route', current).apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_routes.NetAppOntapNetRoutes.get_net_route')
+ def test_successful_modify_destination(self, get_net_route):
+ ''' Test successful modify destination '''
+ data = self.mock_args()
+ del data['destination']
+ data['from_destination'] = '176.0.0.0/24'
+ data['destination'] = '178.0.0.1/24'
+ set_module_args(data)
+ current = {
+ 'destination': '176.0.0.0/24',
+ 'gateway': '10.193.72.1',
+ 'metric': 70,
+ 'vserver': 'test_server'
+ }
+ get_net_route.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_routes.NetAppOntapNetRoutes.get_net_route')
+ def test__modify_destination_idempotency(self, get_net_route):
+ ''' Test modify destination idempotency'''
+ data = self.mock_args()
+ del data['destination']
+ data['from_destination'] = '176.0.0.0/24'
+ data['destination'] = '178.0.0.1/24'
+ set_module_args(data)
+ current = {
+ 'destination': '178.0.0.1/24',
+ 'gateway': '10.193.72.1',
+ 'metric': 70,
+ 'vserver': 'test_server'
+ }
+ get_net_route.side_effect = [
+ current,
+ None
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object('net_route', current).apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_net_route_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_create(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['empty_good'], # post
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotent_create_dns(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['net_routes_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_destroy(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['net_routes_record'], # get
+ SRR['empty_good'], # delete
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotently_destroy(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_modify(self, mock_request):
+ data = self.mock_args(modify=True)
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['net_routes_record'], # get
+ SRR['net_routes_record'], # get
+ SRR['empty_good'], # get
+ SRR['empty_good'], # delete
+ SRR['empty_good'], # post
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotently_modify(self, mock_request):
+ data = self.mock_args(modify=True)
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['modified_record'], # get
+ SRR['modified_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_net_route_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_subnet.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_subnet.py
new file mode 100644
index 00000000..80d27e1b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_subnet.py
@@ -0,0 +1,265 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_subnet \
+ import NetAppOntapSubnet as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if xml.get_child_by_name('query') is not None and \
+ xml.get_child_by_name('query').get_child_by_name('vserver-info') is not None:
+ # assume this a a cserver request
+ xml = self.build_cserver_info()
+ elif self.type == 'subnet':
+ if xml.get_child_by_name('query'):
+ name_obj = xml.get_child_by_name('query').get_child_by_name('net-subnet-info').get_child_by_name('subnet-name')
+ xml_name = name_obj.get_content()
+ if xml_name == self.params.get('name'):
+ xml = self.build_subnet_info(self.params)
+ elif self.type == 'subnet_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_cserver_info():
+ ''' build xml data for vserver-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'vserver-info': {
+ 'vserver-name': 'cserver',
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_subnet_info(data):
+ ''' build xml data for subnet-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ ip_ranges = []
+ for elem in data['ip_ranges']:
+ ip_ranges.append({'ip-range': elem})
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'net-subnet-info': {
+ 'broadcast-domain': data['broadcast_domain'],
+ 'gateway': data['gateway'],
+ 'ip-ranges': ip_ranges,
+ 'ipspace': data['ipspace'],
+ 'subnet': data['subnet'],
+ 'subnet-name': data['name'],
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test_subnet',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'broadcast_domain': 'Default',
+ 'gateway': '10.0.0.1',
+ 'ipspace': 'Default',
+ 'subnet': '10.0.0.0/24',
+ 'ip_ranges': ['10.0.0.10-10.0.0.20', '10.0.0.30']
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_subnet for non-existent subnet'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_subnet() is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_subnet for existing subnet'''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subnet', data=data)
+ assert my_obj.get_subnet() is not None
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_fail_broadcast_domain_modify(self, mock_ems_log):
+ ''' test that boradcast_domain is not alterable '''
+ data = self.set_default_args()
+ data.update({'broadcast_domain': 'Test'})
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subnet', data=self.set_default_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ assert 'cannot modify broadcast_domain parameter' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_subnet.NetAppOntapSubnet.create_subnet')
+ def test_successful_create(self, create_subnet, mock_ems_log):
+ ''' creating subnet and testing idempotency '''
+ print("Create:")
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_subnet.assert_called_with()
+
+ # to reset na_helper from remembering the previous 'changed' value
+ print("reset:")
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subnet', data=data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_subnet.NetAppOntapSubnet.rename_subnet')
+ def test_successful_rename(self, rename_subnet, mock_ems_log):
+ ''' renaming subnet '''
+ data = self.set_default_args()
+ data.update({'from_name': data['name'], 'name': 'new_test_subnet'})
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subnet', data=self.set_default_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_subnet.NetAppOntapSubnet.delete_subnet')
+ def test_successful_delete(self, delete_subnet, mock_ems_log):
+ ''' deleting subnet and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subnet', data=data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_subnet.assert_called_with()
+
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_successful_modify(self, mock_ems_log):
+ ''' modifying subnet and testing idempotency '''
+ data = self.set_default_args()
+ data.update({'ip_ranges': ['10.0.0.10-10.0.0.25', '10.0.0.30']})
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subnet', data=self.set_default_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.ems_log_event')
+ def test_if_all_methods_catch_exception(self, mock_ems_log):
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subnet_fail', data=data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_subnet()
+ assert 'Error creating subnet' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_subnet()
+ assert 'Error deleting subnet' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_subnet()
+ assert 'Error modifying subnet' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.rename_subnet()
+ assert 'Error renaming subnet' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs.py
new file mode 100644
index 00000000..c6cd5ed8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs.py
@@ -0,0 +1,309 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nfs \
+ import NetAppONTAPNFS as nfs_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None, job_error=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'nfs':
+ xml = self.build_nfs_info(self.params)
+ self.xml_out = xml
+ if self.kind == 'nfs_status':
+ xml = self.build_nfs_status_info(self.params)
+ return xml
+
+ @staticmethod
+ def build_nfs_info(nfs_details):
+ ''' build xml data for volume-attributes '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ "attributes-list": {
+ "nfs-info": {
+ "auth-sys-extended-groups": "false",
+ "cached-cred-harvest-timeout": "86400000",
+ "cached-cred-negative-ttl": "7200000",
+ "cached-cred-positive-ttl": "86400000",
+ "cached-transient-err-ttl": "30000",
+ "chown-mode": "use_export_policy",
+ "enable-ejukebox": "true",
+ "extended-groups-limit": "32",
+ "file-session-io-grouping-count": "5000",
+ "file-session-io-grouping-duration": "120",
+ "ignore-nt-acl-for-root": "false",
+ "is-checksum-enabled-for-replay-cache": "true",
+ "is-mount-rootonly-enabled": "true",
+ "is-netgroup-dns-domain-search": "true",
+ "is-nfs-access-enabled": "false",
+ "is-nfs-rootonly-enabled": "false",
+ "is-nfsv2-enabled": "false",
+ "is-nfsv3-64bit-identifiers-enabled": "false",
+ "is-nfsv3-connection-drop-enabled": "true",
+ "is-nfsv3-enabled": "true",
+ "is-nfsv3-fsid-change-enabled": "true",
+ "is-nfsv4-fsid-change-enabled": "true",
+ "is-nfsv4-numeric-ids-enabled": "true",
+ "is-nfsv40-acl-enabled": "false",
+ "is-nfsv40-enabled": "true",
+ "is-nfsv40-migration-enabled": "false",
+ "is-nfsv40-read-delegation-enabled": "false",
+ "is-nfsv40-referrals-enabled": "false",
+ "is-nfsv40-req-open-confirm-enabled": "false",
+ "is-nfsv40-write-delegation-enabled": "false",
+ "is-nfsv41-acl-enabled": "false",
+ "is-nfsv41-acl-preserve-enabled": "true",
+ "is-nfsv41-enabled": "true",
+ "is-nfsv41-migration-enabled": "false",
+ "is-nfsv41-pnfs-enabled": "true",
+ "is-nfsv41-read-delegation-enabled": "false",
+ "is-nfsv41-referrals-enabled": "false",
+ "is-nfsv41-state-protection-enabled": "true",
+ "is-nfsv41-write-delegation-enabled": "false",
+ "is-qtree-export-enabled": "false",
+ "is-rquota-enabled": "false",
+ "is-tcp-enabled": "false",
+ "is-udp-enabled": "false",
+ "is-v3-ms-dos-client-enabled": "false",
+ "is-validate-qtree-export-enabled": "true",
+ "is-vstorage-enabled": "false",
+ "map-unknown-uid-to-default-windows-user": "true",
+ "mountd-port": "635",
+ "name-service-lookup-protocol": "udp",
+ "netgroup-trust-any-ns-switch-no-match": "false",
+ "nfsv4-acl-max-aces": "400",
+ "nfsv4-grace-seconds": "45",
+ "nfsv4-id-domain": "defaultv4iddomain.com",
+ "nfsv4-lease-seconds": "30",
+ "nfsv41-implementation-id-domain": "netapp.com",
+ "nfsv41-implementation-id-name": "NetApp Release Kalyaniblack__9.4.0",
+ "nfsv41-implementation-id-time": "1541070767",
+ "nfsv4x-session-num-slots": "180",
+ "nfsv4x-session-slot-reply-cache-size": "640",
+ "nlm-port": "4045",
+ "nsm-port": "4046",
+ "ntacl-display-permissive-perms": "false",
+ "ntfs-unix-security-ops": "use_export_policy",
+ "permitted-enc-types": {
+ "string": ["des", "des3", "aes_128", "aes_256"]
+ },
+ "rpcsec-ctx-high": "0",
+ "rpcsec-ctx-idle": "0",
+ "rquotad-port": "4049",
+ "showmount": "true",
+ "showmount-timestamp": "1548372452",
+ "skip-root-owner-write-perm-check": "false",
+ "tcp-max-xfer-size": "1048576",
+ "udp-max-xfer-size": "32768",
+ "v3-search-unconverted-filename": "false",
+ "v4-inherited-acl-preserve": "false",
+ "vserver": "ansible"
+ }
+ },
+ "num-records": "1"
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_nfs_status_info(nfs_status_details):
+ ''' build xml data for volume-attributes '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'is-enabled': "true"
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_nfs_group = {
+ 'vserver': 'nfs_vserver',
+ }
+
+ def mock_args(self):
+ return {
+ 'vserver': self.mock_nfs_group['vserver'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': 'False'
+ }
+
+ def get_nfs_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume object
+ """
+ nfsy_obj = nfs_module()
+ nfsy_obj.asup_log_for_cserver = Mock(return_value=None)
+ nfsy_obj.cluster = Mock()
+ nfsy_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ nfsy_obj.server = MockONTAPConnection()
+ else:
+ nfsy_obj.server = MockONTAPConnection(kind=kind, data=self.mock_nfs_group)
+ return nfsy_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ nfs_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_nfs(self):
+ ''' Test if get_nfs_service returns None for non-existent nfs '''
+ set_module_args(self.mock_args())
+ result = self.get_nfs_mock_object().get_nfs_service()
+ assert result is None
+
+ def test_get_existing_nfs(self):
+ ''' Test if get_policy_group returns details for existing nfs '''
+ set_module_args(self.mock_args())
+ result = self.get_nfs_mock_object('nfs').get_nfs_service()
+ assert result['is_nfsv3_enabled']
+
+ def test_get_nonexistent_nfs_status(self):
+ ''' Test if get__nfs_status returns None for non-existent nfs '''
+ set_module_args(self.mock_args())
+ result = self.get_nfs_mock_object().get_nfs_status()
+ assert result is None
+
+ def test_get_existing_nfs_status(self):
+ ''' Test if get__nfs_status returns details for nfs '''
+ set_module_args(self.mock_args())
+ result = self.get_nfs_mock_object('nfs_status').get_nfs_status()
+ assert result
+
+ def test_modify_nfs(self):
+ ''' Test if modify_nfs runs for existing nfs '''
+ data = self.mock_args()
+ data['nfsv3'] = 'enabled'
+ data['nfsv3_fsid_change'] = 'enabled'
+ data['nfsv4'] = 'enabled'
+ data['nfsv41'] = 'enabled'
+ data['vstorage_state'] = 'enabled'
+ data['tcp'] = 'enabled'
+ data['udp'] = 'enabled'
+ data['nfsv4_id_domain'] = 'nfsv4_id_domain'
+ data['nfsv40_acl'] = 'enabled'
+ data['nfsv40_read_delegation'] = 'enabled'
+ data['nfsv40_write_delegation'] = 'enabled'
+ data['nfsv41_acl'] = 'enabled'
+ data['nfsv41_read_delegation'] = 'enabled'
+ data['nfsv41_write_delegation'] = 'enabled'
+ data['showmount'] = 'enabled'
+ data['tcp_max_xfer_size'] = '1048576'
+ set_module_args(data)
+ self.get_nfs_mock_object('nfs_status').modify_nfs()
+
+ def test_successfully_modify_nfs(self):
+ ''' Test modify nfs successful for modifying tcp max xfer size. '''
+ data = self.mock_args()
+ data['tcp_max_xfer_size'] = '8192'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_nfs_mock_object('nfs').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_nfs_idempotency(self):
+ ''' Test modify nfs idempotency '''
+ data = self.mock_args()
+ data['tcp_max_xfer_size'] = '1048576'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_nfs_mock_object('nfs').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nfs.NetAppONTAPNFS.delete_nfs')
+ def test_successfully_delete_nfs(self, delete_nfs):
+ ''' Test successfully delete nfs '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ obj = self.get_nfs_mock_object('nfs')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_nfs.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nfs.NetAppONTAPNFS.get_nfs_service')
+ def test_successfully_enable_nfs(self, get_nfs_service):
+ ''' Test successfully enable nfs on non-existent nfs '''
+ data = self.mock_args()
+ data['state'] = 'present'
+ set_module_args(data)
+ get_nfs_service.side_effect = [
+ None,
+ {}
+ ]
+ obj = self.get_nfs_mock_object('nfs')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py
new file mode 100644
index 00000000..405f1e43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py
@@ -0,0 +1,268 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_ntfs_dacl'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_dacl \
+ import NetAppOntapNtfsDacl as dacl_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required"
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ request = xml.to_string().decode('utf-8')
+ if self.kind == 'error':
+ raise netapp_utils.zapi.NaApiError('test', 'expect error')
+ elif request.startswith("<ems-autosupport-log>"):
+ xml = None # or something that may the logger happy, and you don't need @patch anymore
+ # or
+ # xml = build_ems_log_response()
+ elif request.startswith("<file-directory-security-ntfs-dacl-get-iter>"):
+ if self.kind == 'create':
+ xml = self.build_dacl_info()
+ else:
+ xml = self.build_dacl_info(self.params)
+ elif request.startswith("<file-directory-security-ntfs-dacl-modify>"):
+ xml = self.build_dacl_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_dacl_info(data=None):
+ xml = netapp_utils.zapi.NaElement('xml')
+ vserver = 'vserver'
+ attributes = {'num-records': '0',
+ 'attributes-list': {'file-directory-security-ntfs-dacl': {'vserver': vserver}}}
+
+ if data is not None:
+ attributes['num-records'] = '1'
+ if data.get('access_type'):
+ attributes['attributes-list']['file-directory-security-ntfs-dacl']['access-type'] = data['access_type']
+ if data.get('account'):
+ attributes['attributes-list']['file-directory-security-ntfs-dacl']['account'] = data['account']
+ if data.get('rights'):
+ attributes['attributes-list']['file-directory-security-ntfs-dacl']['rights'] = data['rights']
+ if data.get('advanced_rights'):
+ attributes['attributes-list']['file-directory-security-ntfs-dacl']['advanced-rights'] = data['advanced_rights']
+ if data.get('apply_to'):
+ tmp = []
+ for target in data['apply_to']:
+ tmp.append({'inheritance-level': target})
+ attributes['attributes-list']['file-directory-security-ntfs-dacl']['apply-to'] = tmp
+ if data.get('security_descriptor'):
+ attributes['attributes-list']['file-directory-security-ntfs-dacl']['ntfs-sd'] = data['security_descriptor']
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_ntfs_dacl '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def mock_args(self):
+ return {
+ 'vserver': 'vserver',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_dacl_mock_object(self, type='zapi', kind=None, status=None):
+ dacl_obj = dacl_module()
+ dacl_obj.autosupport_log = Mock(return_value=None)
+ if type == 'zapi':
+ if kind is None:
+ dacl_obj.server = MockONTAPConnection()
+ else:
+ dacl_obj.server = MockONTAPConnection(kind=kind, data=status)
+ return dacl_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ dacl_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_dacl_error(self):
+ data = self.mock_args()
+ data['access_type'] = 'allow'
+ data['account'] = 'acc_test'
+ data['rights'] = 'full_control'
+ data['security_descriptor'] = 'sd_test'
+ data['apply_to'] = 'this_folder,files'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_dacl_mock_object('zapi', 'error', data).apply()
+ msg = 'Error fetching allow DACL for account acc_test for security descriptor sd_test: NetApp API failed. Reason - test:expect error'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_successfully_create_dacl(self):
+ data = self.mock_args()
+ data['access_type'] = 'allow'
+ data['account'] = 'acc_test'
+ data['rights'] = 'full_control'
+ data['security_descriptor'] = 'sd_test'
+ data['apply_to'] = 'this_folder,files'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dacl_mock_object('zapi', 'create', data).apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_dacl_idempotency(self):
+ data = self.mock_args()
+ data['access_type'] = 'allow'
+ data['account'] = 'acc_test'
+ data['rights'] = 'full_control'
+ data['security_descriptor'] = 'sd_test'
+ data['apply_to'] = ['this_folder', 'files']
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dacl_mock_object('zapi', 'create_idempotency', data).apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_modify_dacl(self):
+ data = self.mock_args()
+ data['access_type'] = 'allow'
+ data['account'] = 'acc_test'
+ data['rights'] = 'full_control'
+ data['security_descriptor'] = 'sd_test'
+ data['apply_to'] = ['this_folder', 'files']
+ set_module_args(data)
+ data['advanced_rights'] = 'read_data,write_data'
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dacl_mock_object('zapi', 'create', data).apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_dacl_idempotency(self):
+ data = self.mock_args()
+ data['access_type'] = 'allow'
+ data['account'] = 'acc_test'
+ data['rights'] = 'full_control'
+ data['security_descriptor'] = 'sd_test'
+ data['apply_to'] = ['this_folder', 'files']
+ set_module_args(data)
+ data['rights'] = 'full_control'
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_dacl_mock_object('zapi', 'modify_idempotency', data).apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_dacl.NetAppOntapNtfsDacl.get_dacl')
+ def test_modify_error(self, get_info):
+ data = self.mock_args()
+ data['access_type'] = 'allow'
+ data['account'] = 'acc_test'
+ data['rights'] = 'full_control'
+ data['security_descriptor'] = 'sd_test'
+ set_module_args(data)
+ get_info.side_effect = [
+ {
+ 'access_type': 'allow',
+ 'account': 'acc_test',
+ 'security_descriptor': 'sd_test',
+ 'rights': 'modify',
+ 'apply_to': ['this_folder', 'files']
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_dacl_mock_object('zapi', 'error', data).apply()
+ msg = 'Error modifying allow DACL for account acc_test for security descriptor sd_test: NetApp API failed. Reason - test:expect error'
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_dacl.NetAppOntapNtfsDacl.get_dacl')
+ def test_create_error(self, get_info):
+ data = self.mock_args()
+ data['access_type'] = 'allow'
+ data['account'] = 'acc_test'
+ data['rights'] = 'full_control'
+ data['security_descriptor'] = 'sd_test'
+ set_module_args(data)
+ get_info.side_effect = [
+ None
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_dacl_mock_object('zapi', 'error', data).apply()
+ msg = 'Error adding allow DACL for account acc_test for security descriptor sd_test: NetApp API failed. Reason - test:expect error'
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_dacl.NetAppOntapNtfsDacl.get_dacl')
+ def test_delete_error(self, get_info):
+ data = self.mock_args()
+ data['access_type'] = 'allow'
+ data['account'] = 'acc_test'
+ data['rights'] = 'full_control'
+ data['security_descriptor'] = 'sd_test'
+ data['state'] = 'absent'
+ set_module_args(data)
+ get_info.side_effect = [
+ {
+ 'access_type': 'allow',
+ 'account': 'acc_test',
+ 'security_descriptor': 'sd_test',
+ 'rights': 'modify'
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_dacl_mock_object('zapi', 'error', data).apply()
+ msg = 'Error deleting allow DACL for account acc_test for security descriptor sd_test: NetApp API failed. Reason - test:expect error'
+ assert exc.value.args[0]['msg'] == msg
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py
new file mode 100644
index 00000000..f82e3536
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py
@@ -0,0 +1,225 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_ntfs_sd'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_sd \
+ import NetAppOntapNtfsSd as sd_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ request = xml.to_string().decode('utf-8')
+ if self.kind == 'error':
+ raise netapp_utils.zapi.NaApiError('test', 'expect error')
+ elif request.startswith("<ems-autosupport-log>"):
+ xml = None # or something that may the logger happy, and you don't need @patch anymore
+ # or
+ # xml = build_ems_log_response()
+ elif request.startswith("<file-directory-security-ntfs-get-iter>"):
+ if self.kind == 'create':
+ xml = self.build_sd_info()
+ else:
+ xml = self.build_sd_info(self.params)
+ elif request.startswith("<file-directory-security-ntfs-modify>"):
+ xml = self.build_sd_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_sd_info(data=None):
+ xml = netapp_utils.zapi.NaElement('xml')
+ vserver = 'vserver'
+ attributes = {'num-records': 1,
+ 'attributes-list': {'file-directory-security-ntfs': {'vserver': vserver}}}
+ if data is not None:
+ if data.get('name'):
+ attributes['attributes-list']['file-directory-security-ntfs']['ntfs-sd'] = data['name']
+ if data.get('owner'):
+ attributes['attributes-list']['file-directory-security-ntfs']['owner'] = data['owner']
+ if data.get('group'):
+ attributes['attributes-list']['file-directory-security-ntfs']['group'] = data['group']
+ if data.get('control_flags_raw'):
+ attributes['attributes-list']['file-directory-security-ntfs']['control-flags-raw'] = str(data['control_flags_raw'])
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_ntfs_sd '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def mock_args(self):
+ return {
+ 'vserver': 'vserver',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_sd_mock_object(self, type='zapi', kind=None, status=None):
+ sd_obj = sd_module()
+ netapp_utils.ems_log_event = Mock(return_value=None)
+ if type == 'zapi':
+ if kind is None:
+ sd_obj.server = MockONTAPConnection()
+ else:
+ sd_obj.server = MockONTAPConnection(kind=kind, data=status)
+ return sd_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ sd_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_successfully_create_sd(self):
+ data = self.mock_args()
+ data['name'] = 'sd_test'
+ data['owner'] = 'user_test'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_sd_mock_object('zapi', 'create', data).apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_sd_idempotency(self):
+ data = self.mock_args()
+ data['name'] = 'sd_test'
+ data['owner'] = 'user_test'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_sd_mock_object('zapi', 'create_idempotency', data).apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_modify_sd(self):
+ data = self.mock_args()
+ data['name'] = 'sd_test'
+ data['owner'] = 'user_test'
+ data['control_flags_raw'] = 1
+ set_module_args(data)
+ data['control_flags_raw'] = 2
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_sd_mock_object('zapi', 'create', data).apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_sd_idempotency(self):
+ data = self.mock_args()
+ data['name'] = 'sd_test'
+ data['owner'] = 'user_test'
+ data['control_flags_raw'] = 2
+ set_module_args(data)
+ data['control_flags_raw'] = 2
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_sd_mock_object('zapi', 'modify_idempotency', data).apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_sd.NetAppOntapNtfsSd.get_ntfs_sd')
+ def test_modify_error(self, get_info):
+ data = self.mock_args()
+ data['name'] = 'sd_test'
+ data['owner'] = 'user_test'
+ data['control_flags_raw'] = 2
+ set_module_args(data)
+ get_info.side_effect = [
+ {
+ 'name': 'sd_test',
+ 'control_flags_raw': 1
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_sd_mock_object('zapi', 'error', data).apply()
+ print(exc)
+ assert exc.value.args[0]['msg'] == 'Error modifying NTFS security descriptor sd_test: NetApp API failed. Reason - test:expect error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_sd.NetAppOntapNtfsSd.get_ntfs_sd')
+ def test_create_error(self, get_info):
+ data = self.mock_args()
+ data['name'] = 'sd_test'
+ data['owner'] = 'user_test'
+ set_module_args(data)
+ get_info.side_effect = [
+ None
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_sd_mock_object('zapi', 'error', data).apply()
+ print(exc)
+ assert exc.value.args[0]['msg'] == 'Error creating NTFS security descriptor sd_test: NetApp API failed. Reason - test:expect error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_sd.NetAppOntapNtfsSd.get_ntfs_sd')
+ def test_delete_error(self, get_info):
+ data = self.mock_args()
+ data['name'] = 'sd_test'
+ data['owner'] = 'user_test'
+ data['state'] = 'absent'
+ set_module_args(data)
+ get_info.side_effect = [
+ {
+ 'name': 'sd_test',
+ 'owner': 'user_test'
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_sd_mock_object('zapi', 'error', data).apply()
+ print(exc)
+ assert exc.value.args[0]['msg'] == 'Error deleting NTFS security descriptor sd_test: NetApp API failed. Reason - test:expect error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme.py
new file mode 100644
index 00000000..647a82ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme.py
@@ -0,0 +1,217 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_nvme'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme \
+ import NetAppONTAPNVMe as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'nvme':
+ xml = self.build_nvme_info()
+ elif self.type == 'nvme_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_nvme_info():
+ ''' build xml data for nvme-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': [{'nvme-target-service-info': {'is-available': 'true'}}]}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.193.75.3'
+ username = 'admin'
+ password = 'netapp1!'
+ vserver = 'ansible'
+ status_admin = True
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ vserver = 'vserver'
+ status_admin = True
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'vserver': vserver,
+ 'status_admin': status_admin
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_nvme() for non-existent nvme'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_nvme() is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_nvme() for existing nvme'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='nvme')
+ assert my_obj.get_nvme()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme.NetAppONTAPNVMe.create_nvme')
+ def test_successful_create(self, create_nvme):
+ ''' creating nvme and testing idempotency '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_nvme.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('nvme')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme.NetAppONTAPNVMe.delete_nvme')
+ def test_successful_delete(self, delete_nvme):
+ ''' deleting nvme and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('nvme')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_nvme.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme.NetAppONTAPNVMe.modify_nvme')
+ def test_successful_modify(self, modify_nvme):
+ ''' modifying nvme and testing idempotency '''
+ data = self.set_default_args()
+ data['status_admin'] = False
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('nvme')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ modify_nvme.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('nvme')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('nvme_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_nvme()
+ assert 'Error fetching nvme info:' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_nvme()
+ assert 'Error creating nvme' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_nvme()
+ assert 'Error deleting nvme' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_nvme()
+ assert 'Error modifying nvme' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py
new file mode 100644
index 00000000..ecfaadc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py
@@ -0,0 +1,201 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_nvme_namespace'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_namespace \
+ import NetAppONTAPNVMENamespace as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'namespace':
+ xml = self.build_namespace_info()
+ elif self.type == 'quota_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_namespace_info():
+ ''' build xml data for namespace-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 2,
+ 'attributes-list': [{'nvme-namespace-info': {'path': 'abcd/vol'}},
+ {'nvme-namespace-info': {'path': 'xyz/vol'}}]}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.193.75.3'
+ username = 'admin'
+ password = 'netapp1!'
+ vserver = 'ansible'
+ ostype = 'linux'
+ path = 'abcd/vol'
+ size = 20
+ size_unit = 'mb'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ vserver = 'vserver'
+ ostype = 'linux'
+ path = 'abcd/vol'
+ size = 20
+ size_unit = 'mb'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'ostype': ostype,
+ 'vserver': vserver,
+ 'path': path,
+ 'size': size,
+ 'size_unit': size_unit
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_namespace() for non-existent namespace'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_namespace() is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_namespace() for existing namespace'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='namespace')
+ assert my_obj.get_namespace()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_namespace.NetAppONTAPNVMENamespace.create_namespace')
+ def test_successful_create(self, create_namespace):
+ ''' creating namespace and testing idempotency '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_namespace.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('namespace')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_namespace.NetAppONTAPNVMENamespace.delete_namespace')
+ def test_successful_delete(self, delete_namespace):
+ ''' deleting namespace and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('namespace')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_namespace.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('quota_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_namespace()
+ assert 'Error fetching namespace info:' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_namespace()
+ assert 'Error creating namespace for path' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_namespace()
+ assert 'Error deleting namespace for path' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py
new file mode 100644
index 00000000..c056fc90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py
@@ -0,0 +1,242 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_nvme_subsystem '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_subsystem \
+ import NetAppONTAPNVMESubsystem as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'subsystem':
+ xml = self.build_subsystem_info(self.parm1)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_subsystem_info(vserver):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 2,
+ 'attributes-list': [{'nvme-target-subsystem-map-info': {'path': 'abcd/vol'}},
+ {'nvme-target-subsystem-map-info': {'path': 'xyz/vol'}}]}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.193.75.3'
+ username = 'admin'
+ password = 'netapp1!'
+ subsystem = 'test'
+ vserver = 'ansible'
+ ostype = 'linux'
+ paths = ['abcd/vol', 'xyz/vol']
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ subsystem = 'test'
+ vserver = 'vserver'
+ ostype = 'linux'
+ paths = ['abcd/vol', 'xyz/vol']
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'subsystem': subsystem,
+ 'ostype': ostype,
+ 'vserver': vserver,
+ 'paths': paths
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_subsystem() for non-existent subsystem'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_subsystem() is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_subsystem() for existing subsystem'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subsystem')
+ assert my_obj.get_subsystem()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_subsystem.NetAppONTAPNVMESubsystem.create_subsystem')
+ def test_successful_create(self, create_subsystem):
+ ''' creating subsystem and testing idempotency '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_subsystem.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('subsystem')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_subsystem.NetAppONTAPNVMESubsystem.delete_subsystem')
+ def test_successful_delete(self, delete_subsystem):
+ ''' deleting subsystem and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('subsystem')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_subsystem.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_ensure_get_called(self):
+ ''' test get_subsystem_host_map() for non-existent subsystem'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_subsystem_host_map('paths') is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_subsystem_host_map() for existing subsystem'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='subsystem')
+ assert my_obj.get_subsystem_host_map('paths')
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_subsystem.NetAppONTAPNVMESubsystem.add_subsystem_host_map')
+ def test_successful_add_mock(self, add_subsystem_host_map):
+ ''' adding subsystem host/map and testing idempotency '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ add_subsystem_host_map.assert_called_with(['abcd/vol', 'xyz/vol'], 'paths')
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_subsystem.NetAppONTAPNVMESubsystem.remove_subsystem_host_map')
+ def test_successful_remove_mock(self, remove_subsystem_host_map):
+ ''' removing subsystem host/map and testing idempotency '''
+ data = self.set_default_args()
+ data['paths'] = ['abcd/vol']
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('subsystem')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ remove_subsystem_host_map.assert_called_with(['xyz/vol'], 'paths')
+
+ def test_successful_add(self):
+ ''' adding subsystem host/map and testing idempotency '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_remove(self):
+ ''' removing subsystem host/map and testing idempotency '''
+ data = self.set_default_args()
+ data['paths'] = ['abcd/vol']
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('subsystem')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_object_store.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_object_store.py
new file mode 100644
index 00000000..3317aed3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_object_store.py
@@ -0,0 +1,300 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests for Ansible module: na_ontap_object_store """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_object_store \
+ import NetAppOntapObjectStoreConfig as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_uuid': (200, {'records': [{'uuid': 'ansible'}]}, None),
+ 'get_object_store': (200,
+ {'uuid': 'ansible',
+ 'name': 'ansible',
+ }, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'object_store':
+ xml = self.build_object_store_info()
+ elif self.type == 'object_store_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_object_store_info():
+ ''' build xml data for object store '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'attributes':
+ {'aggr-object-store-config-info':
+ {'object-store-name': 'ansible'}
+ }
+ }
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ # whether to use a mock or a simulator
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ name = 'ansible'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ name = 'ansible'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'name': name
+ })
+
+ def call_command(self, module_args, cx_type='zapi'):
+ ''' utility function to call apply '''
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if cx_type == 'zapi':
+ if not self.onbox:
+ # mock the connection
+ my_obj.server = MockONTAPConnection('object_store')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ return exc.value.args[0]['changed']
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_ensure_object_store_get_called(self, mock_request):
+ ''' fetching details of object store '''
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ SRR['end_of_sequence']
+ ]
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_aggr_object_store() is not None
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_ensure_get_called_existing(self, mock_request):
+ ''' test for existing object store'''
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ SRR['end_of_sequence']
+ ]
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='object_store')
+ assert my_obj.get_aggr_object_store()
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_object_store_create(self, mock_request):
+ ''' test for creating object store'''
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ SRR['end_of_sequence']
+ ]
+ module_args = {
+ 'provider_type': 'abc',
+ 'server': 'abc',
+ 'container': 'abc',
+ 'access_key': 'abc',
+ 'secret_password': 'abc'
+ }
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ # mock the connection
+ my_obj.server = MockONTAPConnection(kind='object_store')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_object_store_delete(self, mock_request):
+ ''' test for deleting object store'''
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ SRR['end_of_sequence']
+ ]
+ module_args = {
+ 'state': 'absent',
+ }
+ changed = self.call_command(module_args)
+ assert changed
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ SRR['end_of_sequence']
+ ]
+ set_module_args(self.set_default_args())
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ data = {
+ 'provider_type': 'abc',
+ 'server': 'abc',
+ 'container': 'abc',
+ 'access_key': 'abc',
+ 'secret_password': 'abc'
+ }
+ data.update(self.set_default_args())
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'],
+ SRR['get_object_store'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_delete(self, mock_request):
+ data = {
+ 'state': 'absent',
+ }
+ data.update(self.set_default_args())
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'],
+ SRR['get_object_store'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_if_all_methods_catch_exception(self, mock_request):
+ mock_request.side_effect = [
+ SRR['is_zapi'],
+ SRR['end_of_sequence']
+ ]
+ module_args = {
+ 'provider_type': 'abc',
+ 'server': 'abc',
+ 'container': 'abc',
+ 'access_key': 'abc',
+ 'secret_password': 'abc'
+ }
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('object_store_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_aggr_object_store()
+ assert '' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_aggr_object_store()
+ assert 'Error provisioning object store config ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_aggr_object_store()
+ assert 'Error removing object store config ' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ports.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ports.py
new file mode 100644
index 00000000..04942486
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ports.py
@@ -0,0 +1,173 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for ONTAP Ansible module: na_ontap_port'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports \
+ import NetAppOntapPorts as port_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def mock_args(self, choice):
+ if choice == 'broadcast_domain':
+ return {
+ 'names': ['test_port_1', 'test_port_2'],
+ 'resource_name': 'test_domain',
+ 'resource_type': 'broadcast_domain',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+ elif choice == 'portset':
+ return {
+ 'names': ['test_lif'],
+ 'resource_name': 'test_portset',
+ 'resource_type': 'portset',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'vserver': 'test_vserver'
+ }
+
+ def get_port_mock_object(self):
+ """
+ Helper method to return an na_ontap_port object
+ """
+ port_obj = port_module()
+ port_obj.asup_log_for_cserver = Mock(return_value=None)
+ port_obj.server = Mock()
+ port_obj.server.invoke_successfully = Mock()
+
+ return port_obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_broadcast_domain_ports')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.get_broadcast_domain_ports')
+ def test_successfully_add_broadcast_domain_ports(self, get_broadcast_domain_ports, add_broadcast_domain_ports):
+ ''' Test successful add broadcast domain ports '''
+ data = self.mock_args('broadcast_domain')
+ set_module_args(data)
+ get_broadcast_domain_ports.side_effect = [
+ []
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_broadcast_domain_ports')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.get_broadcast_domain_ports')
+ def test_add_broadcast_domain_ports_idempotency(self, get_broadcast_domain_ports, add_broadcast_domain_ports):
+ ''' Test add broadcast domain ports idempotency '''
+ data = self.mock_args('broadcast_domain')
+ set_module_args(data)
+ get_broadcast_domain_ports.side_effect = [
+ ['test_port_1', 'test_port_2']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_portset_ports')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.portset_get')
+ def test_successfully_add_portset_ports(self, portset_get, add_portset_ports):
+ ''' Test successful add portset ports '''
+ data = self.mock_args('portset')
+ set_module_args(data)
+ portset_get.side_effect = [
+ []
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_portset_ports')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.portset_get')
+ def test_add_portset_ports_idempotency(self, portset_get, add_portset_ports):
+ ''' Test add portset ports idempotency '''
+ data = self.mock_args('portset')
+ set_module_args(data)
+ portset_get.side_effect = [
+ ['test_lif']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_broadcast_domain_ports')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.get_broadcast_domain_ports')
+ def test_successfully_remove_broadcast_domain_ports(self, get_broadcast_domain_ports, add_broadcast_domain_ports):
+ ''' Test successful remove broadcast domain ports '''
+ data = self.mock_args('broadcast_domain')
+ data['state'] = 'absent'
+ set_module_args(data)
+ get_broadcast_domain_ports.side_effect = [
+ ['test_port_1', 'test_port_2']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_portset_ports')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.portset_get')
+ def test_remove_add_portset_ports(self, portset_get, add_portset_ports):
+ ''' Test successful remove portset ports '''
+ data = self.mock_args('portset')
+ data['state'] = 'absent'
+ set_module_args(data)
+ portset_get.side_effect = [
+ ['test_lif']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_port_mock_object().apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_portset.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_portset.py
new file mode 100644
index 00000000..2efb2275
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_portset.py
@@ -0,0 +1,190 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for ONTAP Ansible module: na_ontap_portset'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_portset \
+ import NetAppONTAPPortset as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None, parm2=None, parm3=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.parm2 = parm2
+ self.parm3 = parm3
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'portset':
+ xml = self.build_portset_info(self.parm1, self.parm2, self.parm3)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_portset_info(portset, vserver, type):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'portset-info': {'portset-name': portset,
+ 'vserver': vserver, 'portset-type': type,
+ 'portset-port-total': '0'}}}
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.use_vsim = False
+
+ def set_default_args(self):
+ if self.use_vsim:
+ hostname = '10.193.77.154'
+ username = 'admin'
+ password = 'netapp1!'
+ name = 'test'
+ type = 'mixed'
+ vserver = 'ansible_test'
+ ports = ['a1', 'a2']
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ name = 'name'
+ type = 'mixed'
+ vserver = 'vserver'
+ ports = ['a1', 'a2']
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'name': name,
+ 'type': type,
+ 'vserver': vserver,
+ 'ports': ports
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_portset_get_called(self):
+ ''' a more interesting test '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ portset = my_obj.portset_get()
+ print('Info: test_portset_get: %s' % repr(portset))
+ assert portset is None
+
+ def test_ensure_portset_apply_called(self):
+ ''' Test successful create '''
+ module_args = {'name': 'create'}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = self.server
+ portset = my_obj.portset_get()
+ print('Info: test_portset_get: %s' % repr(portset))
+ assert portset is None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_portset_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('portset', 'create', 'vserver', 'mixed')
+ portset = my_obj.portset_get()
+ print('Info: test_portset_get: %s' % repr(portset))
+ assert portset is not None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_portset_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_modify_ports(self):
+ ''' Test modify_portset method '''
+ module_args = {'ports': ['l1', 'l2']}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('portset', parm3='mixed')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_portset_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_delete_portset(self):
+ ''' Test successful delete '''
+ module_args = {'state': 'absent'}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('portset')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_portset_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py
new file mode 100644
index 00000000..b376ba8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py
@@ -0,0 +1,347 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group \
+ import NetAppOntapAdaptiveQosPolicyGroup as qos_policy_group_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'policy':
+ xml = self.build_policy_group_info(self.params)
+ if self.kind == 'error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_policy_group_info(vol_details):
+ ''' build xml data for volume-attributes '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'qos-adaptive-policy-group-info': {
+ 'absolute-min-iops': '50IOPS',
+ 'expected-iops': '150IOPS/TB',
+ 'peak-iops': '220IOPS/TB',
+ 'peak-iops-allocation': 'used_space',
+ 'num-workloads': 0,
+ 'pgid': 6941,
+ 'policy-group': vol_details['name'],
+ 'vserver': vol_details['vserver']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_policy_group = {
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver',
+ 'absolute_min_iops': '50IOPS',
+ 'expected_iops': '150IOPS/TB',
+ 'peak_iops': '220IOPS/TB',
+ 'peak_iops_allocation': 'used_space'
+ }
+
+ def mock_args(self):
+ return {
+ 'name': self.mock_policy_group['name'],
+ 'vserver': self.mock_policy_group['vserver'],
+ 'absolute_min_iops': '50IOPS',
+ 'expected_iops': '150IOPS/TB',
+ 'peak_iops': '220IOPS/TB',
+ 'peak_iops_allocation': 'used_space',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': 'False'
+ }
+
+ def get_policy_group_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume object
+ """
+ policy_obj = qos_policy_group_module()
+ policy_obj.autosupport_log = Mock(return_value=None)
+ policy_obj.cluster = Mock()
+ policy_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ policy_obj.server = MockONTAPConnection()
+ else:
+ policy_obj.server = MockONTAPConnection(kind=kind, data=self.mock_policy_group)
+ return policy_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ qos_policy_group_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_policy(self):
+ ''' Test if get_policy_group returns None for non-existent policy_group '''
+ set_module_args(self.mock_args())
+ result = self.get_policy_group_mock_object().get_policy_group()
+ assert result is None
+
+ def test_get_existing_policy_group(self):
+ ''' Test if get_policy_group returns details for existing policy_group '''
+ set_module_args(self.mock_args())
+ result = self.get_policy_group_mock_object('policy').get_policy_group()
+ assert result['name'] == self.mock_policy_group['name']
+ assert result['vserver'] == self.mock_policy_group['vserver']
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if name is not specified'''
+ data = self.mock_args()
+ del data['name']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('policy').create_policy_group()
+ msg = 'missing required arguments: name'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ obj = self.get_policy_group_mock_object('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group')
+ def test_create_error(self, get_policy_group):
+ ''' Test create error '''
+ set_module_args(self.mock_args())
+ get_policy_group.side_effect = [
+ None
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error creating adaptive qos policy group policy_1: NetApp API failed. Reason - test:error'
+
+ def test_successful_delete(self):
+ ''' Test delete existing volume '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ ''' Test delete idempotency '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group')
+ def test_delete_error(self, get_policy_group):
+ ''' Test create idempotency'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ current = {
+ 'absolute_min_iops': '50IOPS',
+ 'expected_iops': '150IOPS/TB',
+ 'peak_iops': '220IOPS/TB',
+ 'peak_iops_allocation': 'used_space',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error deleting adaptive qos policy group policy_1: NetApp API failed. Reason - test:error'
+
+ def test_successful_modify_expected_iops(self):
+ ''' Test successful modify expected iops '''
+ data = self.mock_args()
+ data['expected_iops'] = '175IOPS'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_expected_iops_idempotency(self):
+ ''' Test modify idempotency '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group')
+ def test_modify_error(self, get_policy_group):
+ ''' Test create idempotency '''
+ data = self.mock_args()
+ data['expected_iops'] = '175IOPS'
+ set_module_args(data)
+ current = {
+ 'absolute_min_iops': '50IOPS',
+ 'expected_iops': '150IOPS/TB',
+ 'peak_iops': '220IOPS/TB',
+ 'peak_iops_allocation': 'used_space',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying adaptive qos policy group policy_1: NetApp API failed. Reason - test:error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group')
+ def test_rename(self, get_policy_group):
+ ''' Test rename idempotency '''
+ data = self.mock_args()
+ data['name'] = 'policy_2'
+ data['from_name'] = 'policy_1'
+ set_module_args(data)
+ current = {
+ 'absolute_min_iops': '50IOPS',
+ 'expected_iops': '150IOPS/TB',
+ 'peak_iops': '220IOPS/TB',
+ 'peak_iops_allocation': 'used_space',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group')
+ def test_rename_idempotency(self, get_policy_group):
+ ''' Test rename idempotency '''
+ data = self.mock_args()
+ data['name'] = 'policy_1'
+ data['from_name'] = 'policy_1'
+ current = {
+ 'absolute_min_iops': '50IOPS',
+ 'expected_iops': '150IOPS/TB',
+ 'peak_iops': '220IOPS/TB',
+ 'peak_iops_allocation': 'used_space',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ current,
+ current
+ ]
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group')
+ def test_rename_error(self, get_policy_group):
+ ''' Test create idempotency '''
+ data = self.mock_args()
+ data['from_name'] = 'policy_1'
+ data['name'] = 'policy_2'
+ set_module_args(data)
+ current = {
+ 'absolute_min_iops': '50IOPS',
+ 'expected_iops': '150IOPS/TB',
+ 'peak_iops': '220IOPS/TB',
+ 'peak_iops_allocation': 'used_space',
+ 'is_shared': 'true',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error renaming adaptive qos policy group policy_1: NetApp API failed. Reason - test:error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py
new file mode 100644
index 00000000..295f9070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py
@@ -0,0 +1,340 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_policy_group \
+ import NetAppOntapQosPolicyGroup as qos_policy_group_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'policy':
+ xml = self.build_policy_group_info(self.params)
+ if self.kind == 'error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_policy_group_info(vol_details):
+ ''' build xml data for volume-attributes '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'qos-policy-group-info': {
+ 'is-shared': 'true',
+ 'max-throughput': '800KB/s,800IOPS',
+ 'min-throughput': '100IOPS',
+ 'num-workloads': 0,
+ 'pgid': 8690,
+ 'policy-group': vol_details['name'],
+ 'vserver': vol_details['vserver']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_policy_group = {
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver',
+ 'max_throughput': '800KB/s,800IOPS',
+ 'min_throughput': '100IOPS'
+ }
+
+ def mock_args(self):
+ return {
+ 'name': self.mock_policy_group['name'],
+ 'vserver': self.mock_policy_group['vserver'],
+ 'max_throughput': '800KB/s,800IOPS',
+ 'min_throughput': '100IOPS',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': 'False'
+ }
+
+ def get_policy_group_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume object
+ """
+ policy_obj = qos_policy_group_module()
+ policy_obj.asup_log_for_cserver = Mock(return_value=None)
+ policy_obj.cluster = Mock()
+ policy_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ policy_obj.server = MockONTAPConnection()
+ else:
+ policy_obj.server = MockONTAPConnection(kind=kind, data=self.mock_policy_group)
+ return policy_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ qos_policy_group_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_policy(self):
+ ''' Test if get_policy_group returns None for non-existent policy_group '''
+ set_module_args(self.mock_args())
+ result = self.get_policy_group_mock_object().get_policy_group()
+ assert result is None
+
+ def test_get_existing_policy_group(self):
+ ''' Test if get_policy_group returns details for existing policy_group '''
+ set_module_args(self.mock_args())
+ result = self.get_policy_group_mock_object('policy').get_policy_group()
+ assert result['name'] == self.mock_policy_group['name']
+ assert result['vserver'] == self.mock_policy_group['vserver']
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if name is not specified'''
+ data = self.mock_args()
+ del data['name']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('policy').create_policy_group()
+ msg = 'missing required arguments: name'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ obj = self.get_policy_group_mock_object('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_policy_group.NetAppOntapQosPolicyGroup.get_policy_group')
+ def test_create_error(self, get_policy_group):
+ ''' Test create error '''
+ set_module_args(self.mock_args())
+ get_policy_group.side_effect = [
+ None
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error creating qos policy group policy_1: NetApp API failed. Reason - test:error'
+
+ def test_successful_delete(self):
+ ''' Test delete existing volume '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ ''' Test delete idempotency '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_policy_group.NetAppOntapQosPolicyGroup.get_policy_group')
+ def test_delete_error(self, get_policy_group):
+ ''' Test create idempotency '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ current = {
+ 'max_throughput': '800KB/s,800IOPS',
+ 'min_throughput': '100IOPS',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error deleting qos policy group policy_1: NetApp API failed. Reason - test:error'
+
+ def test_successful_modify_max_throughput(self):
+ ''' Test successful modify max throughput '''
+ data = self.mock_args()
+ data['max_throughput'] = '900KB/s,800iops'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_max_throughput_idempotency(self):
+ ''' Test modify idempotency '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_policy_group.NetAppOntapQosPolicyGroup.get_policy_group')
+ def test_modify_error(self, get_policy_group):
+ ''' Test create idempotency '''
+ data = self.mock_args()
+ data['max_throughput'] = '900KB/s,900IOPS'
+ set_module_args(data)
+ current = {
+ 'max_throughput': '800KB/s,800IOPS',
+ 'min_throughput': '100IOPS',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying qos policy group policy_1: NetApp API failed. Reason - test:error'
+
+ def test_modify_is_shared_error(self):
+ ''' Test create idempotency '''
+ data = self.mock_args()
+ data['max_throughput'] = '900KB/s,900IOPS'
+ data['is_shared'] = False
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert exc.value.args[0]['msg'] == 'Error cannot modify is_shared attribute.'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_policy_group.NetAppOntapQosPolicyGroup.get_policy_group')
+ def test_rename(self, get_policy_group):
+ ''' Test rename idempotency '''
+ data = self.mock_args()
+ data['name'] = 'policy_2'
+ data['from_name'] = 'policy_1'
+ set_module_args(data)
+ current = {
+ 'max_throughput': '800KB/s,800IOPS',
+ 'min_throughput': '100IOPS',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_policy_group.NetAppOntapQosPolicyGroup.get_policy_group')
+ def test_rename_idempotency(self, get_policy_group):
+ ''' Test rename idempotency '''
+ data = self.mock_args()
+ data['name'] = 'policy_1'
+ data['from_name'] = 'policy_1'
+ current = {
+ 'max_throughput': '800KB/s,800IOPS',
+ 'min_throughput': '100IOPS',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ current,
+ current
+ ]
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_group_mock_object('policy').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_policy_group.NetAppOntapQosPolicyGroup.get_policy_group')
+ def test_rename_error(self, get_policy_group):
+ ''' Test create idempotency '''
+ data = self.mock_args()
+ data['from_name'] = 'policy_1'
+ data['name'] = 'policy_2'
+ set_module_args(data)
+ current = {
+ 'is_shared': 'true',
+ 'max_throughput': '800KB/s,800IOPS',
+ 'min_throughput': '100IOPS',
+ 'name': 'policy_1',
+ 'vserver': 'policy_vserver'
+ }
+ get_policy_group.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_policy_group_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error renaming qos policy group policy_1: NetApp API failed. Reason - test:error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qtree.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qtree.py
new file mode 100644
index 00000000..700dd251
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qtree.py
@@ -0,0 +1,464 @@
+''' unit tests ONTAP Ansible module: na_ontap_quotas '''
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree \
+ import NetAppOntapQTree as qtree_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'qtree_record': (200,
+ {"records": [{"svm": {"uuid": "09e9fd5e-8ebd-11e9-b162-005056b39fe7",
+ "name": "ansibleSVM"},
+ "id": 1,
+ "name": "string",
+ "security_style": "unix",
+ "unix_permissions": "abc",
+ "export_policy": {"name": "ansible"},
+ "volume": {"name": "volume1",
+ "uuid": "028baa66-41bd-11e9-81d5-00a0986138f7"}}]}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'qtree':
+ xml = self.build_quota_info()
+ elif self.type == 'qtree_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_quota_info():
+ ''' build xml data for quota-entry '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'qtree-info': {'export-policy': 'ansible', 'vserver': 'ansible', 'qtree': 'ansible',
+ 'oplocks': 'enabled', 'security-style': 'unix', 'mode': 'abc',
+ 'volume': 'ansible'}}}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self, use_rest=None):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'username'
+ password = 'password'
+ name = 'ansible'
+ vserver = 'ansible'
+ flexvol_name = 'ansible'
+ export_policy = 'ansible'
+ security_style = 'unix'
+ mode = 'abc'
+ else:
+ hostname = '10.10.10.10'
+ username = 'username'
+ password = 'password'
+ name = 'ansible'
+ vserver = 'ansible'
+ flexvol_name = 'ansible'
+ export_policy = 'ansible'
+ security_style = 'unix'
+ mode = 'abc'
+
+ args = dict({
+ 'state': 'present',
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'name': name,
+ 'vserver': vserver,
+ 'flexvol_name': flexvol_name,
+ 'export_policy': export_policy,
+ 'security_style': security_style,
+ 'unix_permissions': mode
+ })
+
+ if use_rest is not None:
+ args['use_rest'] = use_rest
+
+ return args
+
+ @staticmethod
+ def get_qtree_mock_object(cx_type='zapi', kind=None):
+ qtree_obj = qtree_module()
+ if cx_type == 'zapi':
+ if kind is None:
+ qtree_obj.server = MockONTAPConnection()
+ else:
+ qtree_obj.server = MockONTAPConnection(kind=kind)
+ return qtree_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ qtree_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_qtree for non-existent qtree'''
+ set_module_args(self.set_default_args(use_rest='Never'))
+ print('starting')
+ my_obj = qtree_module()
+ print('use_rest:', my_obj.use_rest)
+ my_obj.server = self.server
+ assert my_obj.get_qtree is not None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_qtree for existing qtree'''
+ set_module_args(self.set_default_args(use_rest='Never'))
+ my_obj = qtree_module()
+ my_obj.server = MockONTAPConnection(kind='qtree')
+ assert my_obj.get_qtree()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree.NetAppOntapQTree.create_qtree')
+ def test_successful_create(self, create_qtree):
+ ''' creating qtree and testing idempotency '''
+ set_module_args(self.set_default_args(use_rest='Never'))
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_qtree.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ set_module_args(self.set_default_args(use_rest='Never'))
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('qtree')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree.NetAppOntapQTree.delete_qtree')
+ def test_successful_delete(self, delete_qtree):
+ ''' deleting qtree and testing idempotency '''
+ data = self.set_default_args(use_rest='Never')
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('qtree')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ # delete_qtree.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree.NetAppOntapQTree.modify_qtree')
+ def test_successful_modify(self, modify_qtree):
+ ''' modifying qtree and testing idempotency '''
+ data = self.set_default_args(use_rest='Never')
+ data['export_policy'] = 'test'
+ set_module_args(data)
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('qtree')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ # modify_qtree.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data['export_policy'] = 'ansible'
+ set_module_args(data)
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('qtree')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree.NetAppOntapQTree.get_qtree')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree.NetAppOntapQTree.rename_qtree')
+ def test_failed_rename(self, rename_qtree, get_qtree):
+ ''' creating qtree and testing idempotency '''
+ get_qtree.side_effect = [None, None]
+ data = self.set_default_args(use_rest='Never')
+ data['from_name'] = 'ansible_old'
+ set_module_args(data)
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = 'Error renaming: qtree %s does not exist' % data['from_name']
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree.NetAppOntapQTree.get_qtree')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree.NetAppOntapQTree.rename_qtree')
+ def test_successful_rename(self, rename_qtree, get_qtree):
+ ''' creating qtree and testing idempotency '''
+ data = self.set_default_args(use_rest='Never')
+ data['from_name'] = 'ansible_old'
+ qtree = dict(
+ security_style=data['security_style'],
+ unix_permissions=data['unix_permissions'],
+ export_policy=data['export_policy']
+ )
+ get_qtree.side_effect = [None, qtree]
+ set_module_args(data)
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ rename_qtree.assert_called_with(qtree)
+ # Idempotency
+ get_qtree.side_effect = [qtree, 'whatever']
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('qtree')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ data = self.set_default_args(use_rest='Never')
+ data['from_name'] = 'ansible'
+ set_module_args(data)
+ my_obj = qtree_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('qtree_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_qtree()
+ assert 'Error provisioning qtree ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_qtree(self.get_qtree_mock_object())
+ assert 'Error deleting qtree ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_qtree(self.get_qtree_mock_object())
+ assert 'Error modifying qtree ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.rename_qtree(self.get_qtree_mock_object())
+ assert 'Error renaming qtree ' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = self.set_default_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_create_rest(self, mock_request):
+ data = self.set_default_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['empty_good'], # post
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_idempotent_create_rest(self, mock_request):
+ data = self.set_default_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['qtree_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_delete_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['qtree_record'], # get
+ SRR['empty_good'], # delete
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_idempotent_delete_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_modify_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'present'
+ data['unix_permissions'] = 'abcde'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['qtree_record'], # get
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_idempotent_modify_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['qtree_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_rename_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'present'
+ data['from_name'] = 'abcde'
+ # data['unix_permissions'] = 'abcde'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get (current)
+ SRR['qtree_record'], # get (from)
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_rename_rest_idempotent(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'present'
+ data['from_name'] = 'abcde'
+ # data['unix_permissions'] = 'abcde'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['qtree_record'], # get (current exists)
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_successful_rename_and_modify_rest(self, mock_request):
+ data = self.set_default_args()
+ data['state'] = 'present'
+ data['from_name'] = 'abcde'
+ data['unix_permissions'] = 'abcde'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get (current)
+ SRR['qtree_record'], # get (from)
+ SRR['empty_good'], # patch (rename)
+ SRR['empty_good'], # patch (modify)
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_qtree_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quota_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quota_policy.py
new file mode 100644
index 00000000..55f59ae9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quota_policy.py
@@ -0,0 +1,207 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_quota_policy '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_quota_policy \
+ import NetAppOntapQuotaPolicy as quota_policy_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'quota':
+ xml = self.build_quota_policy_info(self.params, True)
+ if self.kind == 'quota_not_assigned':
+ xml = self.build_quota_policy_info(self.params, False)
+ elif self.kind == 'zapi_error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_quota_policy_info(params, assigned):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {'num-records': 1,
+ 'attributes-list': {
+ 'quota-policy-info': {
+ 'policy-name': params['name']},
+ 'vserver-info': {
+ 'quota-policy': params['name'] if assigned else 'default'}
+ }}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_quota_policy '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_quota_policy = {
+ 'state': 'present',
+ 'vserver': 'test_vserver',
+ 'name': 'test_policy'
+ }
+
+ def mock_args(self):
+ return {
+ 'state': self.mock_quota_policy['state'],
+ 'vserver': self.mock_quota_policy['vserver'],
+ 'name': self.mock_quota_policy['name'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_quota_policy_mock_object(self, kind=None):
+ policy_obj = quota_policy_module()
+ netapp_utils.ems_log_event = Mock(return_value=None)
+ if kind is None:
+ policy_obj.server = MockONTAPConnection()
+ else:
+ policy_obj.server = MockONTAPConnection(kind=kind, data=self.mock_quota_policy)
+ return policy_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ quota_policy_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_successfully_create(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_quota_policy_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_quota_policy_mock_object('quota').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_cannot_delete(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_quota_policy_mock_object('quota').apply()
+ msg = 'Error policy test_policy cannot be deleted as it is assigned to the vserver test_vserver'
+ assert msg == exc.value.args[0]['msg']
+
+ def test_successfully_delete(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_quota_policy_mock_object('quota_not_assigned').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_quota_policy_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_assign(self):
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_quota_policy_mock_object('quota_not_assigned').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_quota_policy.NetAppOntapQuotaPolicy.get_quota_policy')
+ def test_successful_rename(self, get_volume):
+ data = self.mock_args()
+ data['name'] = 'new_policy'
+ data['from_name'] = 'test_policy'
+ set_module_args(data)
+ current = {
+ 'name': 'test_policy'
+ }
+ get_volume.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_quota_policy_mock_object('quota').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_error(self):
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_quota_policy_mock_object('zapi_error').get_quota_policy()
+ assert exc.value.args[0]['msg'] == 'Error fetching quota policy test_policy: NetApp API failed. Reason - test:error'
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_quota_policy_mock_object('zapi_error').create_quota_policy()
+ assert exc.value.args[0]['msg'] == 'Error creating quota policy test_policy: NetApp API failed. Reason - test:error'
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_quota_policy_mock_object('zapi_error').delete_quota_policy()
+ assert exc.value.args[0]['msg'] == 'Error deleting quota policy test_policy: NetApp API failed. Reason - test:error'
+ data['name'] = 'new_policy'
+ data['from_name'] = 'test_policy'
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_quota_policy_mock_object('zapi_error').rename_quota_policy()
+ assert exc.value.args[0]['msg'] == 'Error renaming quota policy test_policy: NetApp API failed. Reason - test:error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quotas.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quotas.py
new file mode 100644
index 00000000..134269d7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quotas.py
@@ -0,0 +1,243 @@
+''' unit tests ONTAP Ansible module: na_ontap_quotas '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_quotas \
+ import NetAppONTAPQuotas as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'quotas':
+ xml = self.build_quota_info()
+ elif self.type == 'quota_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_quota_info():
+ ''' build xml data for quota-entry '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'quota-entry': {'volume': 'ansible',
+ 'file-limit': '-', 'disk-limit': '-',
+ 'soft-file-limit': '-', 'soft-disk-limit': '-', 'threshold': '-'}},
+ 'status': 'true'}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.193.75.3'
+ username = 'admin'
+ password = 'netapp1!'
+ volume = 'ansible'
+ vserver = 'ansible'
+ policy = 'ansible'
+ quota_target = '/vol/ansible'
+ type = 'user'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ volume = 'ansible'
+ vserver = 'ansible'
+ policy = 'ansible'
+ quota_target = '/vol/ansible'
+ type = 'user'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'volume': volume,
+ 'vserver': vserver,
+ 'policy': policy,
+ 'quota_target': quota_target,
+ 'type': type
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_quota for non-existent quota'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_quotas is not None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_quota for existing quota'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='quotas')
+ assert my_obj.get_quotas()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_quotas.NetAppONTAPQuotas.quota_entry_set')
+ def test_successful_create(self, quota_entry_set):
+ ''' creating quota and testing idempotency '''
+ data = self.set_default_args()
+ data.update({'file_limit': '3',
+ 'disk_limit': '4',
+ 'soft_file_limit': '3',
+ 'soft_disk_limit': '4',
+ })
+ # data['file_limit'] = '3'
+ # data['disk_limit'] = '4'
+ # data['threshold'] = '4'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ quota_entry_set.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('quotas')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_quotas.NetAppONTAPQuotas.quota_entry_delete')
+ def test_successful_delete(self, quota_entry_delete):
+ ''' deleting quota and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('quotas')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ quota_entry_delete.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ ''' modifying quota and testing idempotency '''
+ data = self.set_default_args()
+ data['file_limit'] = '3'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('quotas')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_quota_on_off(self):
+ ''' quota set on or off '''
+ data = self.set_default_args()
+ data['set_quota_status'] = 'false'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('quotas')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('quota_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_quota_status()
+ assert 'Error fetching quotas status info' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_quotas()
+ assert 'Error fetching quotas info' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.quota_entry_set()
+ assert 'Error adding/modifying quota entry' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.quota_entry_delete()
+ assert 'Error deleting quota entry' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.quota_entry_modify(module_args)
+ assert 'Error modifying quota entry' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.on_or_off_quota('quota-on')
+ assert 'Error setting quota-on for ansible' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py
new file mode 100644
index 00000000..d145a53e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py
@@ -0,0 +1,137 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_rest_cli'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_rest_cli \
+ import NetAppONTAPCommandREST as rest_cli_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific response
+ 'allow': (200, {'Allow': ['GET', 'WHATEVER']}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, data=None):
+ ''' save arguments '''
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ self.xml_out = xml
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def mock_args(self):
+ return {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': False,
+ 'command': 'volume',
+ 'verb': 'GET',
+ 'params': {'fields': 'size,percent_used'}
+ }
+
+ def get_cli_mock_object(self):
+ # For rest, mocking is achieved through side_effect
+ return rest_cli_module()
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ rest_cli_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_cli(self, mock_request):
+ data = dict(self.mock_args())
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_cli_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_cli_options(self, mock_request):
+ data = dict(self.mock_args())
+ data['verb'] = 'OPTIONS'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['allow'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_cli_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ assert 'Allow' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_info.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_info.py
new file mode 100644
index 00000000..a7b400f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_info.py
@@ -0,0 +1,543 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' Unit Tests NetApp ONTAP REST APIs Ansible module: na_ontap_rest_info '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_rest_info \
+ import NetAppONTAPGatherInfo as ontap_rest_info_module
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'validate_ontap_version_pass': (200, {'version': 'ontap_version'}, None),
+ 'validate_ontap_version_fail': (200, None, 'API not found error'),
+ 'get_subset_info': (200,
+ {'_links': {'self': {'href': 'dummy_href'}},
+ 'num_records': 3,
+ 'records': [{'name': 'dummy_vol1'},
+ {'name': 'dummy_vol2'},
+ {'name': 'dummy_vol3'}],
+ 'version': 'ontap_version'}, None),
+ 'get_subset_info_with_next': (200,
+ {'_links': {'self': {'href': 'dummy_href'},
+ 'next': {'href': '/api/next_record_api'}},
+ 'num_records': 3,
+ 'records': [{'name': 'dummy_vol1'},
+ {'name': 'dummy_vol2'},
+ {'name': 'dummy_vol3'}],
+ 'version': 'ontap_version'}, None),
+ 'get_next_record': (200,
+ {'_links': {'self': {'href': 'dummy_href'}},
+ 'num_records': 2,
+ 'records': [{'name': 'dummy_vol1'},
+ {'name': 'dummy_vol2'}],
+ 'version': 'ontap_version'}, None),
+ 'metrocluster_post': (200,
+ {'job': {
+ 'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7',
+ '_links': {
+ 'self': {
+ 'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}}
+ }},
+ None),
+ 'metrocluster_return': (200,
+ {"_links": {
+ "self": {
+ "href": "/api/cluster/metrocluster/diagnostics"
+ }
+ }, "aggregate": {
+ "state": "ok",
+ "summary": {
+ "message": ""
+ }, "timestamp": "2020-07-22T16:42:51-07:00"
+ }}, None),
+ 'job': (200,
+ {
+ "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14",
+ "description": "POST /api/cluster/metrocluster",
+ "state": "success",
+ "message": "There are not enough disks in Pool1.",
+ "code": 2432836,
+ "start_time": "2020-02-26T10:35:44-08:00",
+ "end_time": "2020-02-26T10:47:38-08:00",
+ "_links": {
+ "self": {
+ "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14"
+ }
+ }
+ }, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' A group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False
+ })
+
+ def set_args_run_ontap_version_check(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False,
+ 'max_records': 1024,
+ 'gather_subset': ['volume_info']
+ })
+
+ def set_args_run_metrocluster_diag(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False,
+ 'max_records': 1024,
+ 'gather_subset': ['cluster/metrocluster/diagnostics']
+ })
+
+ def set_args_run_ontap_gather_facts_for_vserver_info(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False,
+ 'max_records': 1024,
+ 'gather_subset': ['vserver_info']
+ })
+
+ def set_args_run_ontap_gather_facts_for_volume_info(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False,
+ 'max_records': 1024,
+ 'gather_subset': ['volume_info']
+ })
+
+ def set_args_run_ontap_gather_facts_for_all_subsets(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False,
+ 'max_records': 1024,
+ 'gather_subset': ['all']
+ })
+
+ def set_args_run_ontap_gather_facts_for_all_subsets_with_fields_section_pass(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False,
+ 'max_records': 1024,
+ 'fields': '*',
+ 'gather_subset': ['all']
+ })
+
+ def set_args_run_ontap_gather_facts_for_all_subsets_with_fields_section_fail(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False,
+ 'max_records': 1024,
+ 'fields': ['uuid', 'name', 'node'],
+ 'gather_subset': ['all']
+ })
+
+ def set_args_run_ontap_gather_facts_for_aggregate_info_with_fields_section_pass(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'fields': ['uuid', 'name', 'node'],
+ 'validate_certs': False,
+ 'max_records': 1024,
+ 'gather_subset': ['aggregate_info']
+ })
+
+ def set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass(self):
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'https': True,
+ 'validate_certs': False,
+ 'max_records': 3,
+ 'gather_subset': ['volume_info']
+ })
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_version_check_for_9_6_pass(self, mock_request):
+ set_module_args(self.set_args_run_ontap_version_check())
+ my_obj = ontap_rest_info_module()
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info'],
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_version_check_for_10_2_pass(self, mock_request):
+ set_module_args(self.set_args_run_ontap_version_check())
+ my_obj = ontap_rest_info_module()
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info'],
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_version_check_for_9_2_fail(self, mock_request):
+ ''' Test for Checking the ONTAP version '''
+ set_module_args(self.set_args_run_ontap_version_check())
+ my_obj = ontap_rest_info_module()
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_fail'],
+ ]
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['msg'] == SRR['validate_ontap_version_fail'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_metrocluster_pass(self, mock_request):
+ set_module_args(self.set_args_run_metrocluster_diag())
+ my_obj = ontap_rest_info_module()
+ gather_subset = ['cluster/metrocluster/diagnostics']
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['metrocluster_post'],
+ SRR['job'],
+ SRR['metrocluster_return']
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_run_metrocluster_digag_pass: %s' % repr(exc.value.args))
+ assert set(exc.value.args[0]['ontap_info']) == set(gather_subset)
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_gather_facts_for_vserver_info_pass(self, mock_request):
+ set_module_args(self.set_args_run_ontap_gather_facts_for_vserver_info())
+ my_obj = ontap_rest_info_module()
+ gather_subset = ['svm/svms']
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info'],
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_run_ontap_gather_facts_for_vserver_info_pass: %s' % repr(exc.value.args))
+ assert set(exc.value.args[0]['ontap_info']) == set(gather_subset)
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_gather_facts_for_volume_info_pass(self, mock_request):
+ set_module_args(self.set_args_run_ontap_gather_facts_for_volume_info())
+ my_obj = ontap_rest_info_module()
+ gather_subset = ['storage/volumes']
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info'],
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_run_ontap_gather_facts_for_volume_info_pass: %s' % repr(exc.value.args))
+ assert set(exc.value.args[0]['ontap_info']) == set(gather_subset)
+
+ # Super Important, Metrocluster doesn't call get_subset_info and has 3 api calls instead of 1!!!!
+ # The metrocluster calls need to be in the correct place. The Module return the keys in a sorted list.
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_gather_facts_for_all_subsets_pass(self, mock_request):
+ set_module_args(self.set_args_run_ontap_gather_facts_for_all_subsets())
+ my_obj = ontap_rest_info_module()
+ gather_subset = ['application/applications', 'application/templates', 'cloud/targets', 'cluster/chassis', 'cluster/jobs',
+ 'cluster/metrocluster/diagnostics', 'cluster/metrics', 'cluster/nodes', 'cluster/peers', 'cluster/schedules',
+ 'cluster/software', 'cluster/software/download', 'cluster/software/history', 'cluster/software/packages',
+ 'name-services/dns', 'name-services/ldap', 'name-services/name-mappings', 'name-services/nis',
+ 'network/ethernet/broadcast-domains', 'network/ethernet/ports', 'network/fc/logins', 'network/fc/wwpn-aliases',
+ 'network/ip/interfaces', 'network/ip/routes', 'network/ip/service-policies', 'network/ipspaces',
+ 'protocols/cifs/home-directory/search-paths', 'protocols/cifs/services', 'protocols/cifs/shares',
+ 'protocols/san/fcp/services', 'protocols/san/igroups', 'protocols/san/iscsi/credentials',
+ 'protocols/san/iscsi/services', 'protocols/san/lun-maps', 'security/accounts', 'security/roles', 'storage/aggregates',
+ 'storage/disks', 'storage/flexcache/flexcaches', 'storage/flexcache/origins', 'storage/luns', 'storage/namespaces',
+ 'storage/ports', 'storage/qos/policies', 'storage/qtrees', 'storage/quota/reports', 'storage/quota/rules',
+ 'storage/shelves', 'storage/snapshot-policies', 'storage/volumes', 'support/autosupport', 'support/autosupport/messages',
+ 'support/ems', 'support/ems/destinations', 'support/ems/events', 'support/ems/filters', 'svm/peers', 'svm/peer-permissions',
+ 'svm/svms']
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['metrocluster_post'],
+ SRR['job'],
+ SRR['metrocluster_return'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_run_ontap_gather_facts_for_all_subsets_pass: %s' % repr(exc.value.args))
+ assert set(exc.value.args[0]['ontap_info']) == set(gather_subset)
+
+ # Super Important, Metrocluster doesn't call get_subset_info and has 3 api calls instead of 1!!!!
+ # The metrocluster calls need to be in the correct place. The Module return the keys in a sorted list.
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_gather_facts_for_all_subsets_with_fields_section_pass(self, mock_request):
+ set_module_args(self.set_args_run_ontap_gather_facts_for_all_subsets_with_fields_section_pass())
+ my_obj = ontap_rest_info_module()
+ gather_subset = ['application/applications', 'application/templates', 'cloud/targets', 'cluster/chassis', 'cluster/jobs',
+ 'cluster/metrocluster/diagnostics', 'cluster/metrics', 'cluster/nodes', 'cluster/peers', 'cluster/schedules',
+ 'cluster/software', 'cluster/software/download', 'cluster/software/history', 'cluster/software/packages',
+ 'name-services/dns', 'name-services/ldap', 'name-services/name-mappings', 'name-services/nis',
+ 'network/ethernet/broadcast-domains', 'network/ethernet/ports', 'network/fc/logins', 'network/fc/wwpn-aliases',
+ 'network/ip/interfaces', 'network/ip/routes', 'network/ip/service-policies', 'network/ipspaces',
+ 'protocols/cifs/home-directory/search-paths', 'protocols/cifs/services', 'protocols/cifs/shares',
+ 'protocols/san/fcp/services', 'protocols/san/igroups', 'protocols/san/iscsi/credentials',
+ 'protocols/san/iscsi/services', 'protocols/san/lun-maps', 'security/accounts', 'security/roles', 'storage/aggregates',
+ 'storage/disks', 'storage/flexcache/flexcaches', 'storage/flexcache/origins', 'storage/luns', 'storage/namespaces',
+ 'storage/ports', 'storage/qos/policies', 'storage/qtrees', 'storage/quota/reports', 'storage/quota/rules',
+ 'storage/shelves', 'storage/snapshot-policies', 'storage/volumes', 'support/autosupport', 'support/autosupport/messages',
+ 'support/ems', 'support/ems/destinations', 'support/ems/events', 'support/ems/filters', 'svm/peers', 'svm/peer-permissions',
+ 'svm/svms']
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['metrocluster_post'],
+ SRR['job'],
+ SRR['metrocluster_return'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_run_ontap_gather_facts_for_all_subsets_pass: %s' % repr(exc.value.args))
+ assert set(exc.value.args[0]['ontap_info']) == set(gather_subset)
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_gather_facts_for_all_subsets_with_fields_section_fail(self, mock_request):
+ set_module_args(self.set_args_run_ontap_gather_facts_for_all_subsets_with_fields_section_fail())
+ my_obj = ontap_rest_info_module()
+ error_message = "Error: fields: %s, only one subset will be allowed." \
+ % self.set_args_run_ontap_gather_facts_for_aggregate_info_with_fields_section_pass()['fields']
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ SRR['get_subset_info'],
+ ]
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print('Info: test_run_ontap_gather_facts_for_all_subsets_pass: %s' % repr(exc.value.args))
+ assert exc.value.args[0]['msg'] == error_message
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_run_ontap_gather_facts_for_aggregate_info_pass_with_fields_section_pass(self, mock_request):
+ set_module_args(self.set_args_run_ontap_gather_facts_for_aggregate_info_with_fields_section_pass())
+ my_obj = ontap_rest_info_module()
+ gather_subset = ['storage/aggregates']
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info'],
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_run_ontap_gather_facts_for_volume_info_pass: %s' % repr(exc.value.args))
+ assert set(exc.value.args[0]['ontap_info']) == set(gather_subset)
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass(self, mock_request):
+ set_module_args(self.set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass())
+ my_obj = ontap_rest_info_module()
+ total_records = 5
+ mock_request.side_effect = [
+ SRR['validate_ontap_version_pass'],
+ SRR['get_subset_info_with_next'],
+ SRR['get_next_record'],
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass: %s' % repr(exc.value.args))
+ assert exc.value.args[0]['ontap_info']['storage/volumes']['num_records'] == total_records
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_certificates.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_certificates.py
new file mode 100644
index 00000000..6e298145
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_certificates.py
@@ -0,0 +1,435 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests for Ansible module: na_ontap_security_certificates """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_certificates \
+ import NetAppOntapSecurityCertificates as my_module # module under test
+
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'empty_records': (200, {'records': []}, None),
+ 'get_uuid': (200, {'records': [{'uuid': 'ansible'}]}, None),
+ 'error_unexpected_name': (200, None, {'message': 'Unexpected argument "name".'})
+}
+
+NAME_ERROR = "Error calling API: security/certificates - ONTAP 9.6 and 9.7 do not support 'name'. Use 'common_name' and 'type' as a work-around."
+TYPE_ERROR = "Error calling API: security/certificates - When using 'common_name', 'type' is required."
+EXPECTED_ERROR = "Error calling API: security/certificates - Expected error"
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+def set_default_args():
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'name_for_certificate'
+ })
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+def test_module_fail_when_required_args_missing(mock_fail):
+ ''' required arguments are reported as errors '''
+ mock_fail.side_effect = fail_json
+ set_module_args({})
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_ensure_get_certificate_called(mock_request, mock_fail):
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'],
+ SRR['end_of_sequence']
+ ]
+ set_module_args(set_default_args())
+ my_obj = my_module()
+ assert my_obj.get_certificate() is not None
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_error(mock_request, mock_fail):
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ set_module_args(set_default_args())
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['msg'] == EXPECTED_ERROR
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_create_failed(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_records'], # get certificate -> not found
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'type': 'client_ca',
+ 'vserver': 'abc',
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = 'Error creating or installing certificate: one or more of the following options are missing:'
+ assert exc.value.args[0]['msg'].startswith(msg)
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_successful_create(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_records'], # get certificate -> not found
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'type': 'client_ca',
+ 'vserver': 'abc',
+ 'common_name': 'cname'
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_idempotent_create(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'], # get certificate -> found
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'type': 'client_ca',
+ 'vserver': 'abc',
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_successful_delete(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'], # get certificate -> found
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'state': 'absent',
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_idempotent_delete(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_records'], # get certificate -> not found
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'state': 'absent',
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_successful_sign(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'], # get certificate -> found
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'vserver': 'abc',
+ 'signing_request': 'CSR'
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_failed_sign_missing_ca(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_records'], # get certificate -> not found
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'vserver': 'abc',
+ 'signing_request': 'CSR'
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "signing certificate with name '%s' not found on svm: %s" % (data['name'], data['vserver'])
+ assert exc.value.args[0]['msg'] == msg
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_failed_sign_absent(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'], # get certificate -> found
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'vserver': 'abc',
+ 'signing_request': 'CSR',
+ 'state': 'absent'
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "'signing_request' is not supported with 'state' set to 'absent'"
+ assert exc.value.args[0]['msg'] == msg
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_failed_on_name(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['error_unexpected_name'], # get certificate -> error
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'vserver': 'abc',
+ 'signing_request': 'CSR',
+ 'state': 'absent',
+ 'ignore_name_if_not_supported': False,
+ 'common_name': 'common_name',
+ 'type': 'root_ca'
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['msg'] == NAME_ERROR
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_cannot_ignore_name_error_no_common_name(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['error_unexpected_name'], # get certificate -> error
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'vserver': 'abc',
+ 'signing_request': 'CSR',
+ 'state': 'absent',
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['msg'] == NAME_ERROR
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_cannot_ignore_name_error_no_type(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['error_unexpected_name'], # get certificate -> error
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'vserver': 'abc',
+ 'signing_request': 'CSR',
+ 'state': 'absent',
+ 'common_name': 'common_name'
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['msg'] == TYPE_ERROR
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_ignore_name_error(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['error_unexpected_name'], # get certificate -> error
+ SRR['get_uuid'], # get certificate -> found
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'vserver': 'abc',
+ 'signing_request': 'CSR',
+ 'state': 'absent',
+ 'common_name': 'common_name',
+ 'type': 'root_ca'
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "'signing_request' is not supported with 'state' set to 'absent'"
+ assert exc.value.args[0]['msg'] == msg
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_rest_successful_create_name_error(mock_request, mock_fail, mock_exit):
+ mock_exit.side_effect = exit_json
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['error_unexpected_name'], # get certificate -> error
+ SRR['empty_records'], # get certificate -> not found
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'common_name': 'cname',
+ 'type': 'client_ca',
+ 'vserver': 'abc',
+ }
+ data.update(set_default_args())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ print(mock_request.mock_calls)
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_key_manager.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_key_manager.py
new file mode 100644
index 00000000..3f5e6a41
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_key_manager.py
@@ -0,0 +1,174 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_key_manager \
+ import NetAppOntapSecurityKeyManager as key_manager_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'key_manager':
+ xml = self.build_port_info(self.data)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_port_info(key_manager_details):
+ ''' build xml data for-key-manager-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'key-manager-info': {
+ 'key-manager-ip-address': '0.0.0.0',
+ 'key-manager-server-status': 'available',
+ 'key-manager-tcp-port': '5696',
+ 'node-name': 'test_node'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_key_manager = {
+ 'node_name': 'test_node',
+ 'tcp_port': 5696,
+ 'ip_address': '0.0.0.0',
+ 'server_status': 'available'
+ }
+
+ def mock_args(self):
+ return {
+ 'node': self.mock_key_manager['node_name'],
+ 'tcp_port': self.mock_key_manager['tcp_port'],
+ 'ip_address': self.mock_key_manager['ip_address'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': 'False'
+ }
+
+ def get_key_manager_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_security_key_manager object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_security_key_manager object
+ """
+ obj = key_manager_module()
+ obj.asup_log_for_cserver = Mock(return_value=None)
+ obj.cluster = Mock()
+ obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ obj.cluster = MockONTAPConnection()
+ else:
+ obj.cluster = MockONTAPConnection(kind=kind, data=self.mock_key_manager)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ key_manager_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_key_manager(self):
+ ''' Test if get_key_manager() returns None for non-existent key manager '''
+ set_module_args(self.mock_args())
+ result = self.get_key_manager_mock_object().get_key_manager()
+ assert result is None
+
+ def test_get_existing_key_manager(self):
+ ''' Test if get_key_manager() returns details for existing key manager '''
+ set_module_args(self.mock_args())
+ result = self.get_key_manager_mock_object('key_manager').get_key_manager()
+ assert result['ip_address'] == self.mock_key_manager['ip_address']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_key_manager.NetAppOntapSecurityKeyManager.get_key_manager')
+ def test_successfully_add_key_manager(self, get_key_manager):
+ ''' Test successfully add key manager'''
+ data = self.mock_args()
+ data['state'] = 'present'
+ set_module_args(data)
+ get_key_manager.side_effect = [
+ None
+ ]
+ obj = self.get_key_manager_mock_object('key_manager')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successfully_delete_key_manager(self):
+ ''' Test successfully delete key manager'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ obj = self.get_key_manager_mock_object('key_manager')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_processor_network.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_processor_network.py
new file mode 100644
index 00000000..ed3596b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_processor_network.py
@@ -0,0 +1,234 @@
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+import time
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_service_processor_network \
+ import NetAppOntapServiceProcessorNetwork as sp_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'sp-enabled':
+ xml = self.build_sp_info(self.data)
+ elif self.kind == 'sp-disabled':
+ xml = self.build_sp_disabled_info(self.data)
+ else:
+ xml = self.build_info()
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_sp_info(sp):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list':
+ {
+ 'service-processor-network-info':
+ {
+ 'node-name': sp['node'],
+ 'is-enabled': 'true',
+ 'address-type': sp['address_type'],
+ 'dhcp': 'v4',
+ 'gateway-ip-address': sp['gateway_ip_address'],
+ 'netmask': sp['netmask'],
+ 'ip-address': sp['ip_address'],
+ 'setup-status': 'succeeded',
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_info():
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 0
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_sp_disabled_info(sp):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list':
+ {
+ 'service-processor-network-info':
+ {
+ 'node-name': sp['node'],
+ 'is-enabled': 'false',
+ 'address-type': sp['address_type'],
+ 'setup-status': 'not_setup',
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_sp = {
+ 'node': 'test-vsim1',
+ 'gateway_ip_address': '2.2.2.2',
+ 'address_type': 'ipv4',
+ 'ip_address': '1.1.1.1',
+ 'netmask': '255.255.248.0',
+ 'dhcp': 'v4'
+ }
+
+ def mock_args(self, enable=True):
+ data = {
+ 'node': self.mock_sp['node'],
+ 'is_enabled': enable,
+ 'address_type': self.mock_sp['address_type'],
+ 'hostname': 'host',
+ 'username': 'admin',
+ 'password': 'password',
+ }
+ if enable is True:
+ data['ip_address'] = self.mock_sp['ip_address']
+ data['gateway_ip_address'] = self.mock_sp['gateway_ip_address']
+ data['netmask'] = self.mock_sp['netmask']
+ data['dhcp'] = 'v4'
+ return data
+
+ def get_sp_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume object
+ """
+ sp_obj = sp_module()
+ sp_obj.autosupport_log = Mock(return_value=None)
+ if kind is None:
+ sp_obj.server = MockONTAPConnection()
+ else:
+ sp_obj.server = MockONTAPConnection(kind=kind, data=self.mock_sp)
+ return sp_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ sp_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_modify_error_on_disabled_sp(self):
+ ''' a more interesting test '''
+ data = self.mock_args(enable=False)
+ data['ip_address'] = self.mock_sp['ip_address']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_sp_mock_object('sp-disabled').apply()
+ assert 'Error: Cannot modify a service processor network if it is disabled.' in \
+ exc.value.args[0]['msg']
+
+ def test_modify_sp(self):
+ ''' a more interesting test '''
+ data = self.mock_args()
+ data['ip_address'] = '3.3.3.3'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_sp_mock_object('sp-enabled').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_sp_wait(self):
+ ''' a more interesting test '''
+ data = self.mock_args()
+ data['ip_address'] = '3.3.3.3'
+ data['wait_for_completion'] = True
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_sp_mock_object('sp-enabled').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_service_processor_network.NetAppOntapServiceProcessorNetwork.'
+ 'get_service_processor_network')
+ def test_non_existing_sp(self, get_sp):
+ set_module_args(self.mock_args())
+ get_sp.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_sp_mock_object().apply()
+ assert 'Error No Service Processor for node: test-vsim1' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_service_processor_network.NetAppOntapServiceProcessorNetwork.'
+ 'get_sp_network_status')
+ @patch('time.sleep')
+ def test_wait_on_sp_status(self, get_sp, sleep):
+ data = self.mock_args()
+ data['gateway_ip_address'] = '4.4.4.4'
+ data['wait_for_completion'] = True
+ set_module_args(data)
+ get_sp.side_effect = ['in_progress', 'done']
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_sp_mock_object('sp-enabled').apply()
+ sleep.assert_called_once_with()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror.py
new file mode 100644
index 00000000..b568e218
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror.py
@@ -0,0 +1,630 @@
+''' unit tests ONTAP Ansible module: na_ontap_snapmirror '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror \
+ import NetAppONTAPSnapmirror as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm=None, status=None, quiesce_status='passed'):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+ self.parm = parm
+ self.status = status
+ self.quiesce_status = quiesce_status
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'snapmirror':
+ xml = self.build_snapmirror_info(self.parm, self.status, self.quiesce_status)
+ elif self.type == 'snapmirror_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_snapmirror_info(mirror_state, status, quiesce_status):
+ ''' build xml data for snapmirror-entry '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'status': quiesce_status,
+ 'attributes-list': {'snapmirror-info': {'mirror-state': mirror_state, 'schedule': None,
+ 'source-location': 'ansible:ansible',
+ 'relationship-status': status, 'policy': 'ansible',
+ 'relationship-type': 'data_protection',
+ 'max-transfer-rate': 1000,
+ 'identity-preserve': 'true'},
+ 'snapmirror-destination-info': {'destination-location': 'ansible'}}}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.source_server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ source_path = 'ansible:ansible'
+ destination_path = 'ansible:ansible'
+ policy = 'ansible'
+ source_vserver = 'ansible'
+ destination_vserver = 'ansible'
+ relationship_type = 'data_protection'
+ schedule = None
+ source_username = 'admin'
+ source_password = 'password'
+ relationship_state = 'active'
+ update = True
+ else:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ source_path = 'ansible:ansible'
+ destination_path = 'ansible:ansible'
+ policy = 'ansible'
+ source_vserver = 'ansible'
+ destination_vserver = 'ansible'
+ relationship_type = 'data_protection'
+ schedule = None
+ source_username = 'admin'
+ source_password = 'password'
+ relationship_state = 'active'
+ update = True
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'source_path': source_path,
+ 'destination_path': destination_path,
+ 'policy': policy,
+ 'source_vserver': source_vserver,
+ 'destination_vserver': destination_vserver,
+ 'relationship_type': relationship_type,
+ 'schedule': schedule,
+ 'source_username': source_username,
+ 'source_password': source_password,
+ 'relationship_state': relationship_state,
+ 'update': update
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test snapmirror_get for non-existent snapmirror'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.snapmirror_get is not None
+
+ def test_ensure_get_called_existing(self):
+ ''' test snapmirror_get for existing snapmirror'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='snapmirror', status='idle')
+ assert my_obj.snapmirror_get()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_create')
+ def test_successful_create(self, snapmirror_create):
+ ''' creating snapmirror and testing idempotency '''
+ data = self.set_default_args()
+ data['schedule'] = 'abc'
+ data['identity_preserve'] = True
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_create.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args()
+ data['update'] = False
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', 'snapmirrored', status='idle')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_failure_break(self):
+ ''' breaking snapmirror to test quiesce time-delay failure '''
+ data = self.set_default_args()
+ data['relationship_state'] = 'broken'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', 'snapmirrored', status='idle', quiesce_status='InProgress')
+ with pytest.raises(AnsibleFailJson) as exc:
+ # replace time.sleep with a noop
+ with patch('time.sleep', lambda a: None):
+ my_obj.apply()
+ assert 'Taking a long time to Quiescing SnapMirror, try again later' in exc.value.args[0]['msg']
+
+ def test_successful_break(self):
+ ''' breaking snapmirror and testing idempotency '''
+ data = self.set_default_args()
+ data['relationship_state'] = 'broken'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', 'snapmirrored', status='idle')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ # to reset na_helper from remembering the previous 'changed' value
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', 'broken-off', status='idle')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_create_without_initialize(self):
+ ''' creating snapmirror and testing idempotency '''
+ data = self.set_default_args()
+ data['schedule'] = 'abc'
+ data['identity_preserve'] = True
+ data['initialize'] = False
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.server = MockONTAPConnection('snapmirror', 'Uninitialized', status='idle')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_create')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.check_elementsw_parameters')
+ def test_successful_element_ontap_create(self, check_param, snapmirror_create):
+ ''' creating ElementSW to ONTAP snapmirror '''
+ data = self.set_default_args()
+ data['schedule'] = 'abc'
+ data['connection_type'] = 'elementsw_ontap'
+ data['source_hostname'] = '10.10.10.10'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_create.assert_called_with()
+ check_param.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_create')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.check_elementsw_parameters')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_get')
+ def test_successful_ontap_element_create(self, snapmirror_get, check_param, snapmirror_create):
+ ''' creating ONTAP to ElementSW snapmirror '''
+ data = self.set_default_args()
+ data['schedule'] = 'abc'
+ data['connection_type'] = 'ontap_elementsw'
+ data['source_hostname'] = '10.10.10.10'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ snapmirror_get.side_effect = [
+ Mock(),
+ None
+ ]
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_create.assert_called_with()
+ check_param.assert_called_with('destination')
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.delete_snapmirror')
+ def test_successful_delete(self, delete_snapmirror):
+ ''' deleting snapmirror and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ data['source_hostname'] = '10.10.10.10'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_destination = Mock(return_value=True)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_snapmirror.assert_called_with(False, 'data_protection', None)
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_delete_error_check(self):
+ ''' check required parameter source cluster hostname deleting snapmirror'''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ assert 'Missing parameters for delete:' in exc.value.args[0]['msg']
+
+ def test_successful_delete_check_get_destination(self):
+ ''' check required parameter source cluster hostname deleting snapmirror'''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ data['source_hostname'] = '10.10.10.10'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle')
+ my_obj.source_server = MockONTAPConnection('snapmirror', status='idle')
+ res = my_obj.get_destination()
+ assert res is True
+
+ def test_snapmirror_release(self):
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.source_server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ my_obj.snapmirror_release()
+ assert my_obj.source_server.xml_in['destination-location'] == data['destination_path']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_resume')
+ def test_snapmirror_resume(self, snapmirror_resume):
+ ''' resuming snapmirror '''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='quiesced', parm='snapmirrored')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_resume.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_restore')
+ def test_snapmirror_restore(self, snapmirror_restore):
+ ''' restore snapmirror '''
+ data = self.set_default_args()
+ data['relationship_type'] = 'restore'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_restore.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_abort')
+ def test_successful_abort(self, snapmirror_abort):
+ ''' deleting snapmirror and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ data['source_hostname'] = '10.10.10.10'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='transferring')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_abort.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_modify')
+ def test_successful_modify(self, snapmirror_modify):
+ ''' modifying snapmirror and testing idempotency '''
+ data = self.set_default_args()
+ data['policy'] = 'ansible2'
+ data['schedule'] = 'abc2'
+ data['max_transfer_rate'] = 2000
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_modify.assert_called_with({'policy': 'ansible2', 'schedule': 'abc2', 'max_transfer_rate': 2000})
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args()
+ data['update'] = False
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.snapmirror_initialize')
+ def test_successful_initialize(self, snapmirror_initialize):
+ ''' initialize snapmirror and testing idempotency '''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='uninitialized')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_initialize.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args()
+ data['update'] = False
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_update(self):
+ ''' update snapmirror and testing idempotency '''
+ data = self.set_default_args()
+ data['policy'] = 'ansible2'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_elementsw_volume_exists(self):
+ ''' elementsw_volume_exists '''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ mock_helper = Mock()
+ mock_helper.volume_id_exists.side_effect = [1000, None]
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ my_obj.check_if_elementsw_volume_exists('10.10.10.10:/lun/1000', mock_helper)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.check_if_elementsw_volume_exists('10.10.10.10:/lun/1000', mock_helper)
+ assert 'Error: Source volume does not exist in the ElementSW cluster' in exc.value.args[0]['msg']
+
+ def test_elementsw_svip_exists(self):
+ ''' svip_exists '''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ mock_helper = Mock()
+ mock_helper.get_cluster_info.return_value.cluster_info.svip = '10.10.10.10'
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ my_obj.validate_elementsw_svip('10.10.10.10:/lun/1000', mock_helper)
+
+ def test_elementsw_svip_exists_negative(self):
+ ''' svip_exists negative testing'''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ mock_helper = Mock()
+ mock_helper.get_cluster_info.return_value.cluster_info.svip = '10.10.10.10'
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.validate_elementsw_svip('10.10.10.11:/lun/1000', mock_helper)
+ assert 'Error: Invalid SVIP' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.set_element_connection')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.validate_elementsw_svip')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.check_if_elementsw_volume_exists')
+ def test_check_elementsw_params_source(self, validate_volume, validate_svip, connection):
+ ''' check elementsw parameters for source '''
+ data = self.set_default_args()
+ data['source_path'] = '10.10.10.10:/lun/1000'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ mock_elem, mock_helper = Mock(), Mock()
+ connection.return_value = mock_helper, mock_elem
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ my_obj.check_elementsw_parameters('source')
+ connection.called_once_with('source')
+ validate_svip.called_once_with(data['source_path'], mock_elem)
+ validate_volume.called_once_with(data['source_path'], mock_helper)
+
+ def test_check_elementsw_params_negative(self):
+ ''' check elementsw parameters for source negative testing '''
+ data = self.set_default_args()
+ del data['source_path']
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.check_elementsw_parameters('source')
+ assert 'Error: Missing required parameter source_path' in exc.value.args[0]['msg']
+
+ def test_check_elementsw_params_invalid(self):
+ ''' check elementsw parameters for source invalid testing '''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.check_elementsw_parameters('source')
+ assert 'Error: invalid source_path' in exc.value.args[0]['msg']
+
+ def test_elementsw_source_path_format(self):
+ ''' test element_source_path_format_matches '''
+ data = self.set_default_args()
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ match = my_obj.element_source_path_format_matches('1.1.1.1:dummy')
+ assert match is None
+ match = my_obj.element_source_path_format_matches('10.10.10.10:/lun/10')
+ assert match is not None
+
+ def test_remote_volume_exists(self):
+ ''' test check_if_remote_volume_exists '''
+ data = self.set_default_args()
+ data['source_volume'] = 'test_vol'
+ data['destination_volume'] = 'test_vol2'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.set_source_cluster_connection = Mock(return_value=None)
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ my_obj.source_server = MockONTAPConnection('snapmirror', status='idle', parm='snapmirrored')
+ res = my_obj.check_if_remote_volume_exists()
+ assert res
+
+ def test_if_all_methods_catch_exception(self):
+ data = self.set_default_args()
+ data['source_hostname'] = '10.10.10.10'
+ data['source_volume'] = 'ansible'
+ data['destination_volume'] = 'ansible2'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.snapmirror_get()
+ assert 'Error fetching snapmirror info: ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.snapmirror_abort()
+ assert 'Error aborting SnapMirror relationship :' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.snapmirror_quiesce = Mock(return_value=None)
+ my_obj.snapmirror_break()
+ assert 'Error breaking SnapMirror relationship :' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.snapmirror_get = Mock(return_value={'mirror_state': 'transferring'})
+ my_obj.snapmirror_initialize()
+ assert 'Error initializing SnapMirror :' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.snapmirror_update()
+ assert 'Error updating SnapMirror :' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.set_source_cluster_connection = Mock(return_value=True)
+ my_obj.source_server = MockONTAPConnection('snapmirror_fail')
+ my_obj.check_if_remote_volume_exists()
+ assert 'Error fetching source volume details' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.check_if_remote_volume_exists = Mock(return_value=True)
+ my_obj.source_server = MockONTAPConnection()
+ my_obj.snapmirror_create()
+ assert 'Error creating SnapMirror ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.snapmirror_quiesce = Mock(return_value=None)
+ my_obj.get_destination = Mock(return_value=None)
+ my_obj.snapmirror_break = Mock(return_value=None)
+ my_obj.delete_snapmirror(False, 'data_protection', None)
+ assert 'Error deleting SnapMirror :' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.snapmirror_modify({'policy': 'ansible2', 'schedule': 'abc2'})
+ assert 'Error modifying SnapMirror schedule or policy :' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py
new file mode 100644
index 00000000..2b62e4dc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py
@@ -0,0 +1,717 @@
+''' unit tests ONTAP Ansible module: na_ontap_snapmirror_policy '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy \
+ import NetAppOntapSnapMirrorPolicy as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': ({}, None),
+ 'end_of_sequence': (None, "Unexpected call to send_request"),
+ 'generic_error': (None, "Expected error"),
+ # module specific responses
+ 'get_snapmirror_policy': {'vserver': 'ansible',
+ 'policy_name': 'ansible',
+ 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890',
+ 'comment': 'created by ansible',
+ 'policy_type': 'async_mirror',
+ 'snapmirror_label': [],
+ 'keep': [],
+ 'schedule': [],
+ 'prefix': []},
+ 'get_snapmirror_policy_with_rules': {'vserver': 'ansible',
+ 'policy_name': 'ansible',
+ 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890',
+ 'comment': 'created by ansible',
+ 'policy_type': 'async_mirror',
+ 'snapmirror_label': ['daily', 'weekly', 'monthly'],
+ 'keep': [7, 5, 12],
+ 'schedule': ['', 'weekly', 'monthly'],
+ 'prefix': ['', 'weekly', 'monthly']}
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+ self.parm = parm
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'snapmirror_policy':
+ xml = self.build_snapmirror_policy_info(self.parm)
+ elif self.type == 'snapmirror_policy_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_snapmirror_policy_info(mirror_state):
+ ''' build xml data for snapmirror_policy-entry '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'snapmirror-policy-info': {'comment': 'created by ansible',
+ 'policy-name': 'ansible',
+ 'type': 'async_mirror',
+ 'tries': '8',
+ 'transfer-priority': 'normal',
+ 'restart': 'always',
+ 'is-network-compression-enabled': False,
+ 'ignore-atime': False,
+ 'vserver-name': 'ansible'}}}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.source_server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self, use_rest=None, with_rules=False):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ vserver = 'ansible'
+ policy_name = 'ansible'
+ policy_type = 'async_mirror'
+ comment = 'created by ansible'
+ snapmirror_label = ['daily', 'weekly', 'monthly']
+ keep = [7, 5, 12]
+ schedule = ['', 'weekly', 'monthly']
+ prefix = ['', 'weekly', 'monthly']
+ else:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'password'
+ vserver = 'ansible'
+ policy_name = 'ansible'
+ policy_type = 'async_mirror'
+ comment = 'created by ansible'
+ snapmirror_label = ['daily', 'weekly', 'monthly']
+ keep = [7, 5, 12]
+ schedule = ['', 'weekly', 'monthly']
+ prefix = ['', 'weekly', 'monthly']
+
+ args = dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'vserver': vserver,
+ 'policy_name': policy_name,
+ 'policy_type': policy_type,
+ 'comment': comment
+ })
+
+ if with_rules:
+ args['snapmirror_label'] = snapmirror_label
+ args['keep'] = keep
+ args['schedule'] = schedule
+ args['prefix'] = prefix
+
+ if use_rest is not None:
+ args['use_rest'] = use_rest
+
+ return args
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_snapmirror_policy for non-existent snapmirror policy'''
+ set_module_args(self.set_default_args(use_rest='Never'))
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_snapmirror_policy is not None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_snapmirror_policy for existing snapmirror policy'''
+ set_module_args(self.set_default_args(use_rest='Never'))
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='snapmirror_policy')
+ assert my_obj.get_snapmirror_policy()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.create_snapmirror_policy')
+ def test_successful_create(self, snapmirror_create_policy):
+ ''' creating snapmirror policy without rules and testing idempotency '''
+ data = self.set_default_args(use_rest='Never')
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_create_policy.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args(use_rest='Never')
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.create_snapmirror_policy')
+ def test_successful_create_with_rest(self, snapmirror_create_policy):
+ ''' creating snapmirror policy without rules via REST and testing idempotency '''
+ data = self.set_default_args()
+ data['use_rest'] = 'Always'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock()
+ my_obj.get_snapmirror_policy.side_effect = [None, SRR['get_snapmirror_policy']]
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_create_policy.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args(use_rest='Never')
+ data['use_rest'] = 'Always'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.create_snapmirror_policy')
+ def test_successful_create_with_rules(self, snapmirror_create_policy):
+ ''' creating snapmirror policy with rules and testing idempotency '''
+ data = self.set_default_args(use_rest='Never', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_create_policy.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args(use_rest='Never', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy_with_rules'])
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.modify_snapmirror_policy_rules')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.create_snapmirror_policy')
+ def test_successful_create_with_rules_via_rest(self, snapmirror_create_policy, modify_snapmirror_policy_rules):
+ ''' creating snapmirror policy with rules via rest and testing idempotency '''
+ data = self.set_default_args(use_rest='Always', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock()
+ my_obj.get_snapmirror_policy.side_effect = [None, SRR['get_snapmirror_policy']]
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_create_policy.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args(use_rest='Always', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy_with_rules'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.delete_snapmirror_policy')
+ def test_successful_delete(self, delete_snapmirror_policy):
+ ''' deleting snapmirror policy and testing idempotency '''
+ data = self.set_default_args(use_rest='Never')
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_snapmirror_policy.assert_called_with(None)
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.delete_snapmirror_policy')
+ def test_successful_delete_with_rest(self, delete_snapmirror_policy):
+ ''' deleting snapmirror policy via REST and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ data['use_rest'] = 'Always'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_snapmirror_policy.assert_called_with('abcdef12-3456-7890-abcd-ef1234567890')
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=None)
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.modify_snapmirror_policy')
+ def test_successful_modify(self, snapmirror_policy_modify):
+ ''' modifying snapmirror policy without rules and testing idempotency '''
+ data = self.set_default_args(use_rest='Never')
+ data['comment'] = 'old comment'
+ data['ignore_atime'] = True
+ data['is_network_compression_enabled'] = True
+ data['owner'] = 'cluster_admin'
+ data['restart'] = 'default'
+ data['tries'] = '7'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_policy_modify.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args(use_rest='Never')
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.modify_snapmirror_policy')
+ def test_successful_modify_with_rest(self, snapmirror_policy_modify):
+ ''' modifying snapmirror policy without rules via REST and testing idempotency '''
+ data = self.set_default_args()
+ data['comment'] = 'old comment'
+ data['use_rest'] = 'Always'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_policy_modify.assert_called_with('abcdef12-3456-7890-abcd-ef1234567890', 'async_mirror')
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args()
+ data['use_rest'] = 'Always'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.modify_snapmirror_policy')
+ def test_successful_modify_with_rules(self, snapmirror_policy_modify):
+ ''' modifying snapmirror policy with rules and testing idempotency '''
+ data = self.set_default_args(use_rest='Never', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy'])
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_policy_modify.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args(use_rest='Never', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy_with_rules'])
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.modify_snapmirror_policy_rules')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy.NetAppOntapSnapMirrorPolicy.modify_snapmirror_policy')
+ def test_successful_modify_with_rules_via_rest(self, snapmirror_policy_modify, modify_snapmirror_policy_rules):
+ ''' modifying snapmirror policy with rules via rest and testing idempotency '''
+ data = self.set_default_args(use_rest='Always', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ snapmirror_policy_modify.assert_called_with('abcdef12-3456-7890-abcd-ef1234567890', 'async_mirror')
+ # to reset na_helper from remembering the previous 'changed' value
+ data = self.set_default_args(use_rest='Always', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ my_obj.get_snapmirror_policy = Mock(return_value=SRR['get_snapmirror_policy_with_rules'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ data = self.set_default_args(use_rest='Never')
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapmirror_policy_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_snapmirror_policy()
+ assert 'Error getting snapmirror policy ansible:' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapmirror_policy()
+ assert 'Error creating snapmirror policy ansible:' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_snapmirror_policy()
+ assert 'Error deleting snapmirror policy ansible:' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_snapmirror_policy()
+ assert 'Error modifying snapmirror policy ansible:' in exc.value.args[0]['msg']
+
+ def test_create_snapmirror_policy_retention_obj_for_rest(self):
+ ''' test create_snapmirror_policy_retention_obj_for_rest '''
+ data = self.set_default_args(use_rest='Never')
+ set_module_args(data)
+ my_obj = my_module()
+
+ # Test no rules
+ self.assertEqual(my_obj.create_snapmirror_policy_retention_obj_for_rest(), [])
+
+ # Test one rule
+ rules = [{'snapmirror_label': 'daily', 'keep': 7}]
+ retention_obj = [{'label': 'daily', 'count': '7'}]
+ self.assertEqual(my_obj.create_snapmirror_policy_retention_obj_for_rest(rules), retention_obj)
+
+ # Test two rules, with a prefix
+ rules = [{'snapmirror_label': 'daily', 'keep': 7},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly'}]
+ retention_obj = [{'label': 'daily', 'count': '7'},
+ {'label': 'weekly', 'count': '5', 'prefix': 'weekly'}]
+ self.assertEqual(my_obj.create_snapmirror_policy_retention_obj_for_rest(rules), retention_obj)
+
+ # Test three rules, with a prefix & schedule
+ rules = [{'snapmirror_label': 'daily', 'keep': 7},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv'},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}]
+ retention_obj = [{'label': 'daily', 'count': '7'},
+ {'label': 'weekly', 'count': '5', 'prefix': 'weekly_sv'},
+ {'label': 'monthly', 'count': '12', 'prefix': 'monthly_sv', 'creation_schedule': {'name': 'monthly'}}]
+ self.assertEqual(my_obj.create_snapmirror_policy_retention_obj_for_rest(rules), retention_obj)
+
+ def test_identify_snapmirror_policy_rules_with_schedule(self):
+ ''' test identify_snapmirror_policy_rules_with_schedule '''
+ data = self.set_default_args(use_rest='Never')
+ set_module_args(data)
+ my_obj = my_module()
+
+ # Test no rules
+ self.assertEqual(my_obj.identify_snapmirror_policy_rules_with_schedule(), ([], []))
+
+ # Test one non-schedule rule identified
+ rules = [{'snapmirror_label': 'daily', 'keep': 7}]
+ schedule_rules = []
+ non_schedule_rules = [{'snapmirror_label': 'daily', 'keep': 7}]
+ self.assertEqual(my_obj.identify_snapmirror_policy_rules_with_schedule(rules), (schedule_rules, non_schedule_rules))
+
+ # Test one schedule and two non-schedule rules identified
+ rules = [{'snapmirror_label': 'daily', 'keep': 7},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv'},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}]
+ schedule_rules = [{'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}]
+ non_schedule_rules = [{'snapmirror_label': 'daily', 'keep': 7},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv'}]
+ self.assertEqual(my_obj.identify_snapmirror_policy_rules_with_schedule(rules), (schedule_rules, non_schedule_rules))
+
+ # Test three schedule & zero non-schedule rules identified
+ rules = [{'snapmirror_label': 'daily', 'keep': 7, 'schedule': 'daily'},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv', 'schedule': 'weekly'},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}]
+ schedule_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'schedule': 'daily'},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv', 'schedule': 'weekly'},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}]
+ non_schedule_rules = []
+ self.assertEqual(my_obj.identify_snapmirror_policy_rules_with_schedule(rules), (schedule_rules, non_schedule_rules))
+
+ def test_identify_new_snapmirror_policy_rules(self):
+ ''' test identify_new_snapmirror_policy_rules '''
+
+ # Test with no rules in parameters. new_rules should always be [].
+ data = self.set_default_args(use_rest='Never', with_rules=False)
+ set_module_args(data)
+ my_obj = my_module()
+
+ current = None
+ new_rules = []
+ self.assertEqual(my_obj.identify_new_snapmirror_policy_rules(current), new_rules)
+
+ current = {'snapmirror_label': ['daily'], 'keep': [7], 'prefix': [''], 'schedule': ['']}
+ new_rules = []
+ self.assertEqual(my_obj.identify_new_snapmirror_policy_rules(current), new_rules)
+
+ # Test with rules in parameters.
+ data = self.set_default_args(use_rest='Never', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+
+ # Test three new rules identified when no rules currently exist
+ current = None
+ new_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}]
+ self.assertEqual(my_obj.identify_new_snapmirror_policy_rules(current), new_rules)
+
+ # Test two new rules identified and one rule already exists
+ current = {'snapmirror_label': ['daily'], 'keep': [7], 'prefix': [''], 'schedule': ['']}
+ new_rules = [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}]
+ self.assertEqual(my_obj.identify_new_snapmirror_policy_rules(current), new_rules)
+
+ # Test one new rule identified and two rules already exist
+ current = {'snapmirror_label': ['daily', 'monthly'],
+ 'keep': [7, 12],
+ 'prefix': ['', 'monthly'],
+ 'schedule': ['', 'monthly']}
+ new_rules = [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}]
+ self.assertEqual(my_obj.identify_new_snapmirror_policy_rules(current), new_rules)
+
+ # Test no new rules identified as all rules already exist
+ current = {'snapmirror_label': ['daily', 'monthly', 'weekly'],
+ 'keep': [7, 12, 5],
+ 'prefix': ['', 'monthly', 'weekly'],
+ 'schedule': ['', 'monthly', 'weekly']}
+ new_rules = []
+ self.assertEqual(my_obj.identify_new_snapmirror_policy_rules(current), new_rules)
+
+ def test_identify_obsolete_snapmirror_policy_rules(self):
+ ''' test identify_obsolete_snapmirror_policy_rules '''
+
+ # Test with no rules in parameters. obsolete_rules should always be [].
+ data = self.set_default_args(use_rest='Never', with_rules=False)
+ set_module_args(data)
+ my_obj = my_module()
+
+ current = None
+ obsolete_rules = []
+ self.assertEqual(my_obj.identify_obsolete_snapmirror_policy_rules(current), obsolete_rules)
+
+ current = {'snapmirror_label': ['daily'], 'keep': [7], 'prefix': [''], 'schedule': ['']}
+ obsolete_rules = []
+ self.assertEqual(my_obj.identify_obsolete_snapmirror_policy_rules(current), obsolete_rules)
+
+ # Test removing all rules. obsolete_rules should equal current.
+ data = self.set_default_args(use_rest='Never', with_rules=False)
+ data['snapmirror_label'] = []
+ set_module_args(data)
+ my_obj = my_module()
+
+ current = {'snapmirror_label': ['monthly', 'weekly', 'hourly', 'daily', 'yearly'],
+ 'keep': [12, 5, 24, 7, 7],
+ 'prefix': ['monthly', 'weekly', '', '', 'yearly'],
+ 'schedule': ['monthly', 'weekly', '', '', 'yearly']}
+ obsolete_rules = [{'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'},
+ {'snapmirror_label': 'hourly', 'keep': 24, 'prefix': '', 'schedule': ''},
+ {'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''},
+ {'snapmirror_label': 'yearly', 'keep': 7, 'prefix': 'yearly', 'schedule': 'yearly'}]
+ self.assertEqual(my_obj.identify_obsolete_snapmirror_policy_rules(current), obsolete_rules)
+
+ # Test with rules in parameters.
+ data = self.set_default_args(use_rest='Never', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+
+ # Test no rules exist, thus no obsolete rules
+ current = None
+ obsolete_rules = []
+ self.assertEqual(my_obj.identify_obsolete_snapmirror_policy_rules(current), obsolete_rules)
+
+ # Test new rules and one obsolete rule identified
+ current = {'snapmirror_label': ['hourly'], 'keep': [24], 'prefix': [''], 'schedule': ['']}
+ obsolete_rules = [{'snapmirror_label': 'hourly', 'keep': 24, 'prefix': '', 'schedule': ''}]
+ self.assertEqual(my_obj.identify_obsolete_snapmirror_policy_rules(current), obsolete_rules)
+
+ # Test new rules, with one retained and one obsolete rule identified
+ current = {'snapmirror_label': ['hourly', 'daily'],
+ 'keep': [24, 7],
+ 'prefix': ['', ''],
+ 'schedule': ['', '']}
+ obsolete_rules = [{'snapmirror_label': 'hourly', 'keep': 24, 'prefix': '', 'schedule': ''}]
+ self.assertEqual(my_obj.identify_obsolete_snapmirror_policy_rules(current), obsolete_rules)
+
+ # Test new rules and two obsolete rules identified
+ current = {'snapmirror_label': ['monthly', 'weekly', 'hourly', 'daily', 'yearly'],
+ 'keep': [12, 5, 24, 7, 7],
+ 'prefix': ['monthly', 'weekly', '', '', 'yearly'],
+ 'schedule': ['monthly', 'weekly', '', '', 'yearly']}
+ obsolete_rules = [{'snapmirror_label': 'hourly', 'keep': 24, 'prefix': '', 'schedule': ''},
+ {'snapmirror_label': 'yearly', 'keep': 7, 'prefix': 'yearly', 'schedule': 'yearly'}]
+ self.assertEqual(my_obj.identify_obsolete_snapmirror_policy_rules(current), obsolete_rules)
+
+ def test_identify_modified_snapmirror_policy_rules(self):
+ ''' test identify_modified_snapmirror_policy_rules '''
+
+ # Test with no rules in parameters. modified_rules & unmodified_rules should always be [].
+ data = self.set_default_args(use_rest='Never', with_rules=False)
+ data.pop('snapmirror_label', None)
+ set_module_args(data)
+ my_obj = my_module()
+
+ current = None
+ modified_rules, unmodified_rules = [], []
+ self.assertEqual(my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules, unmodified_rules))
+
+ current = {'snapmirror_label': ['daily'], 'keep': [14], 'prefix': ['daily'], 'schedule': ['daily']}
+ modified_rules, unmodified_rules = [], []
+ self.assertEqual(my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules, unmodified_rules))
+
+ # Test removing all rules. modified_rules & unmodified_rules should be [].
+ data = self.set_default_args(use_rest='Never', with_rules=False)
+ data['snapmirror_label'] = []
+ set_module_args(data)
+ my_obj = my_module()
+ current = {'snapmirror_label': ['monthly', 'weekly', 'hourly', 'daily', 'yearly'],
+ 'keep': [12, 5, 24, 7, 7],
+ 'prefix': ['monthly', 'weekly', '', '', 'yearly'],
+ 'schedule': ['monthly', 'weekly', '', '', 'yearly']}
+ modified_rules, unmodified_rules = [], []
+ self.assertEqual(my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules, unmodified_rules))
+
+ # Test with rules in parameters.
+ data = self.set_default_args(use_rest='Never', with_rules=True)
+ set_module_args(data)
+ my_obj = my_module()
+
+ # Test no rules exist, thus no modified & unmodified rules
+ current = None
+ modified_rules, unmodified_rules = [], []
+ self.assertEqual(my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules, unmodified_rules))
+
+ # Test new rules don't exist, thus no modified & unmodified rules
+ current = {'snapmirror_label': ['hourly'], 'keep': [24], 'prefix': [''], 'schedule': ['']}
+ modified_rules, unmodified_rules = [], []
+ self.assertEqual(my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules, unmodified_rules))
+
+ # Test daily & monthly modified, weekly unmodified
+ current = {'snapmirror_label': ['hourly', 'daily', 'weekly', 'monthly'],
+ 'keep': [24, 14, 5, 6],
+ 'prefix': ['', 'daily', 'weekly', 'monthly'],
+ 'schedule': ['', 'daily', 'weekly', 'monthly']}
+ modified_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}]
+ unmodified_rules = [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}]
+ self.assertEqual(my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules, unmodified_rules))
+
+ # Test all rules modified
+ current = {'snapmirror_label': ['daily', 'weekly', 'monthly'],
+ 'keep': [14, 10, 6],
+ 'prefix': ['', '', ''],
+ 'schedule': ['daily', 'weekly', 'monthly']}
+ modified_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}]
+ unmodified_rules = []
+ self.assertEqual(my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules, unmodified_rules))
+
+ # Test all rules unmodified
+ current = {'snapmirror_label': ['daily', 'weekly', 'monthly'],
+ 'keep': [7, 5, 12],
+ 'prefix': ['', 'weekly', 'monthly'],
+ 'schedule': ['', 'weekly', 'monthly']}
+ modified_rules = []
+ unmodified_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''},
+ {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'},
+ {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}]
+ self.assertEqual(my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules, unmodified_rules))
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot.py
new file mode 100644
index 00000000..74c87355
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot.py
@@ -0,0 +1,227 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_nvme_snapshot'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot \
+ import NetAppOntapSnapshot as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'snapshot':
+ xml = self.build_snapshot_info()
+ elif self.type == 'snapshot_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_snapshot_info():
+ ''' build xml data for snapshot-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'snapshot-info': {'comment': 'new comment',
+ 'name': 'ansible',
+ 'snapmirror-label': 'label12'}}}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.193.75.3'
+ username = 'admin'
+ password = 'netapp1!'
+ vserver = 'ansible'
+ volume = 'ansible'
+ snapshot = 'ansible'
+ comment = 'new comment'
+ snapmirror_label = 'label12'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ vserver = 'vserver'
+ volume = 'ansible'
+ snapshot = 'ansible'
+ comment = 'new comment'
+ snapmirror_label = 'label12'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'vserver': vserver,
+ 'volume': volume,
+ 'snapshot': snapshot,
+ 'comment': comment,
+ 'snapmirror_label': snapmirror_label
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_snapshot() for non-existent snapshot'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_snapshot() is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_snapshot() for existing snapshot'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='snapshot')
+ assert my_obj.get_snapshot()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot.NetAppOntapSnapshot.create_snapshot')
+ def test_successful_create(self, create_snapshot):
+ ''' creating snapshot and testing idempotency '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_snapshot.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot.NetAppOntapSnapshot.modify_snapshot')
+ def test_successful_modify(self, modify_snapshot):
+ ''' modifying snapshot and testing idempotency '''
+ data = self.set_default_args()
+ data['comment'] = 'adding comment'
+ data['snapmirror_label'] = 'label22'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ modify_snapshot.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ data['comment'] = 'new comment'
+ data['snapmirror_label'] = 'label12'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot.NetAppOntapSnapshot.delete_snapshot')
+ def test_successful_delete(self, delete_snapshot):
+ ''' deleting snapshot and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_snapshot.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapshot()
+ assert 'Error creating snapshot ansible:' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_snapshot()
+ assert 'Error deleting snapshot ansible:' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_snapshot()
+ assert 'Error modifying snapshot ansible:' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py
new file mode 100644
index 00000000..57bd42dc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py
@@ -0,0 +1,691 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_snapshot_policy'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy \
+ import NetAppOntapSnapshotPolicy as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'policy':
+ xml = self.build_snapshot_policy_info()
+ elif self.type == 'snapshot_policy_info_policy_disabled':
+ xml = self.build_snapshot_policy_info_policy_disabled()
+ elif self.type == 'snapshot_policy_info_comment_modified':
+ xml = self.build_snapshot_policy_info_comment_modified()
+ elif self.type == 'snapshot_policy_info_schedules_added':
+ xml = self.build_snapshot_policy_info_schedules_added()
+ elif self.type == 'snapshot_policy_info_schedules_deleted':
+ xml = self.build_snapshot_policy_info_schedules_deleted()
+ elif self.type == 'snapshot_policy_info_modified_schedule_counts':
+ xml = self.build_snapshot_policy_info_modified_schedule_counts()
+ elif self.type == 'policy_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ def asup_log_for_cserver(self):
+ ''' mock autosupport log'''
+ return None
+
+ @staticmethod
+ def build_snapshot_policy_info():
+ ''' build xml data for snapshot-policy-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {
+ 'snapshot-policy-info': {
+ 'comment': 'new comment',
+ 'enabled': 'true',
+ 'policy': 'ansible',
+ 'snapshot-policy-schedules': {
+ 'snapshot-schedule-info': {
+ 'count': 100,
+ 'schedule': 'hourly',
+ 'prefix': 'hourly',
+ 'snapmirror-label': ''
+ }
+ },
+ 'vserver-name': 'hostname'
+ }
+ }}
+ xml.translate_struct(data)
+ return xml
+
+ @staticmethod
+ def build_snapshot_policy_info_comment_modified():
+ ''' build xml data for snapshot-policy-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {
+ 'snapshot-policy-info': {
+ 'comment': 'modified comment',
+ 'enabled': 'true',
+ 'policy': 'ansible',
+ 'snapshot-policy-schedules': {
+ 'snapshot-schedule-info': {
+ 'count': 100,
+ 'schedule': 'hourly',
+ 'prefix': 'hourly',
+ 'snapmirror-label': ''
+ }
+ },
+ 'vserver-name': 'hostname'
+ }
+ }}
+ xml.translate_struct(data)
+ return xml
+
+ @staticmethod
+ def build_snapshot_policy_info_policy_disabled():
+ ''' build xml data for snapshot-policy-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {
+ 'snapshot-policy-info': {
+ 'comment': 'new comment',
+ 'enabled': 'false',
+ 'policy': 'ansible',
+ 'snapshot-policy-schedules': {
+ 'snapshot-schedule-info': {
+ 'count': 100,
+ 'schedule': 'hourly',
+ 'prefix': 'hourly',
+ 'snapmirror-label': ''
+ }
+ },
+ 'vserver-name': 'hostname'
+ }
+ }}
+ xml.translate_struct(data)
+ return xml
+
+ @staticmethod
+ def build_snapshot_policy_info_schedules_added():
+ ''' build xml data for snapshot-policy-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {
+ 'snapshot-policy-info': {
+ 'comment': 'new comment',
+ 'enabled': 'true',
+ 'policy': 'ansible',
+ 'snapshot-policy-schedules': [
+ {
+ 'snapshot-schedule-info': {
+ 'count': 100,
+ 'schedule': 'hourly',
+ 'prefix': 'hourly',
+ 'snapmirror-label': ''
+ }
+ },
+ {
+ 'snapshot-schedule-info': {
+ 'count': 5,
+ 'schedule': 'daily',
+ 'prefix': 'daily',
+ 'snapmirror-label': 'daily'
+ }
+ },
+ {
+ 'snapshot-schedule-info': {
+ 'count': 10,
+ 'schedule': 'weekly',
+ 'prefix': 'weekly',
+ 'snapmirror-label': ''
+ }
+ }
+ ],
+ 'vserver-name': 'hostname'
+ }
+ }}
+ xml.translate_struct(data)
+ return xml
+
+ @staticmethod
+ def build_snapshot_policy_info_schedules_deleted():
+ ''' build xml data for snapshot-policy-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {
+ 'snapshot-policy-info': {
+ 'comment': 'new comment',
+ 'enabled': 'true',
+ 'policy': 'ansible',
+ 'snapshot-policy-schedules': [
+ {
+ 'snapshot-schedule-info': {
+ 'schedule': 'daily',
+ 'prefix': 'daily',
+ 'count': 5,
+ 'snapmirror-label': 'daily'
+ }
+ }
+ ],
+ 'vserver-name': 'hostname'
+ }
+ }}
+ xml.translate_struct(data)
+ return xml
+
+ @staticmethod
+ def build_snapshot_policy_info_modified_schedule_counts():
+ ''' build xml data for snapshot-policy-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {
+ 'snapshot-policy-info': {
+ 'comment': 'new comment',
+ 'enabled': 'true',
+ 'policy': 'ansible',
+ 'snapshot-policy-schedules': [
+ {
+ 'snapshot-schedule-info': {
+ 'count': 10,
+ 'schedule': 'hourly',
+ 'prefix': 'hourly',
+ 'snapmirror-label': ''
+ }
+ },
+ {
+ 'snapshot-schedule-info': {
+ 'count': 50,
+ 'schedule': 'daily',
+ 'prefix': 'daily',
+ 'snapmirror-label': 'daily'
+ }
+ },
+ {
+ 'snapshot-schedule-info': {
+ 'count': 100,
+ 'schedule': 'weekly',
+ 'prefix': 'weekly',
+ 'snapmirror-label': ''
+ }
+ }
+ ],
+ 'vserver-name': 'hostname'
+ }
+ }}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = '1234'
+ name = 'ansible'
+ enabled = True
+ count = 100
+ schedule = 'hourly'
+ prefix = 'hourly'
+ comment = 'new comment'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ name = 'ansible'
+ enabled = True
+ count = 100
+ schedule = 'hourly'
+ prefix = 'hourly'
+ comment = 'new comment'
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'name': name,
+ 'enabled': enabled,
+ 'count': count,
+ 'schedule': schedule,
+ 'prefix': prefix,
+ 'comment': comment
+ })
+
+ def set_default_current(self):
+ default_args = self.set_default_args()
+ return dict({
+ 'name': default_args['name'],
+ 'enabled': default_args['enabled'],
+ 'count': [default_args['count']],
+ 'schedule': [default_args['schedule']],
+ 'snapmirror_label': [''],
+ 'prefix': [default_args['prefix']],
+ 'comment': default_args['comment'],
+ 'vserver': default_args['hostname']
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_snapshot_policy() for non-existent snapshot policy'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ assert my_obj.get_snapshot_policy() is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_snapshot_policy() for existing snapshot policy'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = MockONTAPConnection(kind='policy')
+ assert my_obj.get_snapshot_policy()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.create_snapshot_policy')
+ def test_successful_create(self, create_snapshot):
+ ''' creating snapshot policy and testing idempotency '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_snapshot.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy')
+ def test_successful_modify_comment(self, modify_snapshot):
+ ''' modifying snapshot policy comment and testing idempotency '''
+ data = self.set_default_args()
+ data['comment'] = 'modified comment'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ current = self.set_default_current()
+ modify_snapshot.assert_called_with(current)
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot_policy_info_comment_modified')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy')
+ def test_successful_disable_policy(self, modify_snapshot):
+ ''' disabling snapshot policy and testing idempotency '''
+ data = self.set_default_args()
+ data['enabled'] = False
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ current = self.set_default_current()
+ modify_snapshot.assert_called_with(current)
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot_policy_info_policy_disabled')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy')
+ def test_successful_enable_policy(self, modify_snapshot):
+ ''' enabling snapshot policy and testing idempotency '''
+ data = self.set_default_args()
+ data['enabled'] = True
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot_policy_info_policy_disabled')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ current = self.set_default_current()
+ current['enabled'] = False
+ modify_snapshot.assert_called_with(current)
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy')
+ def test_successful_modify_schedules_add(self, modify_snapshot):
+ ''' adding snapshot policy schedules and testing idempotency '''
+ data = self.set_default_args()
+ data['schedule'] = ['hourly', 'daily', 'weekly']
+ data['prefix'] = ['hourly', 'daily', 'weekly']
+ data['count'] = [100, 5, 10]
+ data['snapmirror_label'] = ['', 'daily', '']
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ current = self.set_default_current()
+ modify_snapshot.assert_called_with(current)
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot_policy_info_schedules_added')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy')
+ def test_successful_modify_schedules_delete(self, modify_snapshot):
+ ''' deleting snapshot policy schedules and testing idempotency '''
+ data = self.set_default_args()
+ data['schedule'] = ['daily']
+ data['prefix'] = ['daily']
+ data['count'] = [5]
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ current = self.set_default_current()
+ modify_snapshot.assert_called_with(current)
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot_policy_info_schedules_deleted')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy')
+ def test_successful_modify_schedules(self, modify_snapshot):
+ ''' modifying snapshot policy schedule counts and testing idempotency '''
+ data = self.set_default_args()
+ data['schedule'] = ['hourly', 'daily', 'weekly']
+ data['count'] = [10, 50, 100]
+ data['prefix'] = ['hourly', 'daily', 'weekly']
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ current = self.set_default_current()
+ modify_snapshot.assert_called_with(current)
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('snapshot_policy_info_modified_schedule_counts')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.delete_snapshot_policy')
+ def test_successful_delete(self, delete_snapshot):
+ ''' deleting snapshot policy and testing idempotency '''
+ data = self.set_default_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ delete_snapshot.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_valid_schedule_count(self):
+ ''' validate when schedule has same number of elements '''
+ data = self.set_default_args()
+ data['schedule'] = ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ data['prefix'] = ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ data['count'] = [1, 2, 3, 4, 5]
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ my_obj.create_snapshot_policy()
+ create_xml = my_obj.server.xml_in
+ assert data['count'][2] == int(create_xml['count3'])
+ assert data['schedule'][4] == create_xml['schedule5']
+
+ def test_valid_schedule_count_with_snapmirror_labels(self):
+ ''' validate when schedule has same number of elements with snapmirror labels '''
+ data = self.set_default_args()
+ data['schedule'] = ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ data['prefix'] = ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ data['count'] = [1, 2, 3, 4, 5]
+ data['snapmirror_label'] = ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ my_obj.create_snapshot_policy()
+ create_xml = my_obj.server.xml_in
+ assert data['count'][2] == int(create_xml['count3'])
+ assert data['schedule'][4] == create_xml['schedule5']
+ assert data['snapmirror_label'][3] == create_xml['snapmirror-label4']
+
+ def test_invalid_params(self):
+ ''' validate error when schedule does not have same number of elements '''
+ data = self.set_default_args()
+ data['schedule'] = ['s1', 's2']
+ data['prefix'] = ['s1', 's2']
+ data['count'] = [1, 2, 3]
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapshot_policy()
+ msg = 'Error: A Snapshot policy must have at least 1 ' \
+ 'schedule and can have up to a maximum of 5 schedules, with a count ' \
+ 'representing the maximum number of Snapshot copies for each schedule'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_invalid_schedule_count(self):
+ ''' validate error when schedule has more than 5 elements '''
+ data = self.set_default_args()
+ data['schedule'] = ['s1', 's2', 's3', 's4', 's5', 's6']
+ data['count'] = [1, 2, 3, 4, 5, 6]
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapshot_policy()
+ msg = 'Error: A Snapshot policy must have at least 1 ' \
+ 'schedule and can have up to a maximum of 5 schedules, with a count ' \
+ 'representing the maximum number of Snapshot copies for each schedule'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_invalid_schedule_count_less_than_one(self):
+ ''' validate error when schedule has less than 1 element '''
+ data = self.set_default_args()
+ data['schedule'] = []
+ data['count'] = []
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapshot_policy()
+ msg = 'Error: A Snapshot policy must have at least 1 ' \
+ 'schedule and can have up to a maximum of 5 schedules, with a count ' \
+ 'representing the maximum number of Snapshot copies for each schedule'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_invalid_schedule_count_is_none(self):
+ ''' validate error when schedule is None '''
+ data = self.set_default_args()
+ data['schedule'] = None
+ data['count'] = None
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapshot_policy()
+ msg = 'Error: A Snapshot policy must have at least 1 ' \
+ 'schedule and can have up to a maximum of 5 schedules, with a count ' \
+ 'representing the maximum number of Snapshot copies for each schedule'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_invalid_schedule_count_with_snapmirror_labels(self):
+ ''' validate error when schedule with snapmirror labels does not have same number of elements '''
+ data = self.set_default_args()
+ data['schedule'] = ['s1', 's2', 's3']
+ data['count'] = [1, 2, 3]
+ data['snapmirror_label'] = ['sm1', 'sm2']
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapshot_policy()
+ msg = 'Error: Each Snapshot Policy schedule must have an accompanying SnapMirror Label'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_invalid_schedule_count_with_prefixes(self):
+ ''' validate error when schedule with prefixes does not have same number of elements '''
+ data = self.set_default_args()
+ data['schedule'] = ['s1', 's2', 's3']
+ data['count'] = [1, 2, 3]
+ data['prefix'] = ['s1', 's2']
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.asup_log_for_cserver = Mock(return_value=None)
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapshot_policy()
+ msg = 'Error: Each Snapshot Policy schedule must have an accompanying prefix'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_if_all_methods_catch_exception(self):
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('policy_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_snapshot_policy()
+ assert 'Error creating snapshot policy ansible:' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_snapshot_policy()
+ assert 'Error deleting snapshot policy ansible:' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py
new file mode 100644
index 00000000..18092fe0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py
@@ -0,0 +1,153 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests for Ansible module: na_ontap_snmp_traphosts """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snmp_traphosts \
+ import NetAppONTAPSnmpTraphosts as traphost_module # module under test
+
+# REST API canned responses when mocking send_request
+
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'no_record': (200, {"records": {}}, None),
+ 'get_snmp_traphosts': (
+ 200,
+ {"records": [{
+ "host": "0.0.0.0",
+ "ip_address": "0.0.0.0"
+ }]
+ }, None
+ )
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ """ Unit tests for na_ontap_wwpn_alias """
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_alias = {
+ 'ip_address': '0.0.0.0',
+ }
+
+ def mock_args(self):
+ return {
+ 'ip_address': self.mock_alias['ip_address'],
+ 'hostname': 'test_host',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_alias_mock_object(self):
+ alias_obj = traphost_module()
+ return alias_obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ """Test successful rest create"""
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['no_record'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_idempotency(self, mock_request):
+ """Test rest create idempotency"""
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_snmp_traphosts'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_delete(self, mock_request):
+ """Test successful rest delete"""
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_snmp_traphosts'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_delete_idempotency(self, mock_request):
+ """Test successful rest delete"""
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['no_record'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_software_update.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_software_update.py
new file mode 100644
index 00000000..a771b276
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_software_update.py
@@ -0,0 +1,190 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_software_update '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_software_update \
+ import NetAppONTAPSoftwareUpdate as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None, parm2=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.parm2 = parm2
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ print(xml.to_string())
+ if xml.to_string().startswith(b'<cluster-image-get><node-id>'):
+ xml = self.build_image_info()
+ elif self.type == 'software_update':
+ xml = self.build_software_update_info(self.parm1, self.parm2)
+ self.xml_out = xml
+ return xml
+
+ def autosupport_log(self):
+ ''' mock autosupport log'''
+ return None
+
+ @staticmethod
+ def build_image_info():
+ ''' build xml data for software-update-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {
+ 'attributes': {'cluster-image-info': {'node-id': 'node4test',
+ 'current-version': 'Fattire__9.3.0'}},
+ }
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+ @staticmethod
+ def build_software_update_info(status, node):
+ ''' build xml data for software-update-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {
+ 'num-records': 1,
+ 'attributes-list': {'cluster-image-info': {'node-id': node}},
+ 'progress-status': status,
+ 'attributes': {'ndu-progress-info': {'overall-status': 'completed',
+ 'completed-node-count': '0'}},
+ }
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.use_vsim = False
+
+ def set_default_args(self):
+ if self.use_vsim:
+ hostname = '10.10.10.10'
+ username = 'admin'
+ password = 'admin'
+ node = 'vsim1'
+ package_version = 'Fattire__9.3.0'
+ package_url = 'abc.com'
+ stabilize_minutes = 10
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ node = 'abc'
+ package_version = 'Fattire__9.3.0'
+ package_url = 'abc.com'
+ stabilize_minutes = 10
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'nodes': node,
+ 'package_version': package_version,
+ 'package_url': package_url,
+ 'https': 'true',
+ 'stabilize_minutes': stabilize_minutes
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_image_get_called(self):
+ ''' a more interesting test '''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.server = self.server
+ cluster_image_get = my_obj.cluster_image_get()
+ print('Info: test_software_update_get: %s' % repr(cluster_image_get))
+ assert cluster_image_get == list()
+
+ def test_ensure_apply_for_update_called_idempotent(self):
+ ''' updating software and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_software_update_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ def test_ensure_apply_for_update_called(self):
+ ''' updating software and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'package_version': 'PlinyTheElder'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.autosupport_log = Mock(return_value=None)
+ if not self.use_vsim:
+ my_obj.server = MockONTAPConnection('software_update', 'async_pkg_get_phase_complete', 'abc')
+ with pytest.raises(AnsibleExitJson) as exc:
+ # replace time.sleep with a noop
+ with patch('time.sleep', lambda a: None):
+ my_obj.apply()
+ print('Info: test_software_update_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_svm.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_svm.py
new file mode 100644
index 00000000..9ea6785a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_svm.py
@@ -0,0 +1,430 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_svm \
+ import NetAppOntapSVM as svm_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'svm_record': (200,
+ {'records': [{"uuid": "09e9fd5e-8ebd-11e9-b162-005056b39fe7",
+ "name": "test_svm",
+ "subtype": "default",
+ "language": "c.utf_8",
+ "aggregates": [{"name": "aggr_1",
+ "uuid": "850dd65b-8811-4611-ac8c-6f6240475ff9"},
+ {"name": "aggr_2",
+ "uuid": "850dd65b-8811-4611-ac8c-6f6240475ff9"}],
+ "comment": "new comment",
+ "ipspace": {"name": "ansible_ipspace",
+ "uuid": "2b760d31-8dfd-11e9-b162-005056b39fe7"},
+ "snapshot_policy": {"uuid": "3b611707-8dfd-11e9-b162-005056b39fe7",
+ "name": "old_snapshot_policy"},
+ "nfs": {"enabled": True},
+ "cifs": {"enabled": False},
+ "iscsi": {"enabled": False},
+ "fcp": {"enabled": False},
+ "nvme": {"enabled": False}}]}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'vserver':
+ xml = self.build_vserver_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_vserver_info(vserver):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1, 'attributes-list': {'vserver-info': {
+ 'vserver-name': vserver['name'],
+ 'ipspace': vserver['ipspace'],
+ 'root-volume': vserver['root_volume'],
+ 'root-volume-aggregate': vserver['root_volume_aggregate'],
+ 'language': vserver['language'],
+ 'comment': vserver['comment'],
+ 'snapshot-policy': vserver['snapshot_policy'],
+ 'vserver-subtype': vserver['subtype'],
+ 'allowed-protocols': [{'protocol': 'nfs'}, {'protocol': 'cifs'}],
+ 'aggr-list': [{'aggr-name': 'aggr_1'}, {'aggr-name': 'aggr_2'}],
+ }}}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_vserver = {
+ 'name': 'test_svm',
+ 'root_volume': 'ansible_vol',
+ 'root_volume_aggregate': 'ansible_aggr',
+ 'aggr_list': 'aggr_1,aggr_2',
+ 'ipspace': 'ansible_ipspace',
+ 'subtype': 'default',
+ 'language': 'c.utf_8',
+ 'snapshot_policy': 'old_snapshot_policy',
+ 'comment': 'new comment'
+ }
+
+ def mock_args(self, rest=False):
+ if rest:
+ return {'name': self.mock_vserver['name'],
+ 'aggr_list': self.mock_vserver['aggr_list'],
+ 'ipspace': self.mock_vserver['ipspace'],
+ 'comment': self.mock_vserver['comment'],
+ 'subtype': 'default',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'}
+ else:
+ return {
+ 'name': self.mock_vserver['name'],
+ 'root_volume': self.mock_vserver['root_volume'],
+ 'root_volume_aggregate': self.mock_vserver['root_volume_aggregate'],
+ 'aggr_list': self.mock_vserver['aggr_list'],
+ 'ipspace': self.mock_vserver['ipspace'],
+ 'comment': self.mock_vserver['comment'],
+ 'subtype': 'default',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_vserver_mock_object(self, kind=None, data=None, cx_type='zapi'):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection()
+ :param data: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume object
+ """
+ vserver_obj = svm_module()
+ if cx_type == 'zapi':
+ vserver_obj.asup_log_for_cserver = Mock(return_value=None)
+ vserver_obj.cluster = Mock()
+ vserver_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ vserver_obj.server = MockONTAPConnection()
+ else:
+ if data is None:
+ vserver_obj.server = MockONTAPConnection(kind='vserver', data=self.mock_vserver)
+ else:
+ vserver_obj.server = MockONTAPConnection(kind='vserver', data=data)
+ return vserver_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ svm_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_vserver(self):
+ ''' test if get_vserver() throws an error if vserver is not specified '''
+ data = self.mock_args()
+ set_module_args(data)
+ result = self.get_vserver_mock_object().get_vserver()
+ assert result is None
+
+ def test_create_error_missing_name(self):
+ ''' Test if create throws an error if name is not specified'''
+ data = self.mock_args()
+ del data['name']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_vserver_mock_object('vserver').create_vserver()
+ msg = 'missing required arguments: name'
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_svm.NetAppOntapSVM.create_vserver')
+ def test_successful_create(self, create_vserver):
+ '''Test successful create'''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ create_vserver.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_svm.NetAppOntapSVM.create_vserver')
+ def test_create_idempotency(self, create_vserver):
+ '''Test successful create'''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object('vserver').apply()
+ assert not exc.value.args[0]['changed']
+ create_vserver.assert_not_called()
+
+ def test_successful_delete(self):
+ '''Test successful delete'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object('vserver').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_svm.NetAppOntapSVM.delete_vserver')
+ def test_delete_idempotency(self, delete_vserver):
+ '''Test delete idempotency'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+ delete_vserver.assert_not_called()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_svm.NetAppOntapSVM.get_vserver')
+ def test_successful_rename(self, get_vserver):
+ '''Test successful rename'''
+ data = self.mock_args()
+ data['from_name'] = 'test_svm'
+ data['name'] = 'test_new_svm'
+ set_module_args(data)
+ current = {
+ 'name': 'test_svm',
+ 'root_volume': 'ansible_vol',
+ 'root_volume_aggregate': 'ansible_aggr',
+ 'ipspace': 'ansible_ipspace',
+ 'subtype': 'default',
+ 'language': 'c.utf_8'
+ }
+ get_vserver.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_language(self):
+ '''Test successful modify language'''
+ data = self.mock_args()
+ data['language'] = 'c'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object('vserver').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_snapshot_policy(self):
+ '''Test successful modify language'''
+ data = self.mock_args()
+ data['snapshot_policy'] = 'new_snapshot_policy'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object('vserver').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_allowed_protocols(self):
+ '''Test successful modify allowed protocols'''
+ data = self.mock_args()
+ data['allowed_protocols'] = 'protocol_1,protocol_2'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object('vserver').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_aggr_list(self):
+ '''Test successful modify aggr-list'''
+ data = self.mock_args()
+ data['aggr_list'] = 'aggr_3,aggr_4'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object('vserver').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_vserver_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error_unsupported_parm(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['use_rest'] = 'Always'
+ data['root_volume'] = 'not_supported_by_rest'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_vserver_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == "REST API currently does not support 'root_volume'"
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_create(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['empty_good'], # post
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_idempotency(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['state'] = 'present'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['svm_record'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_delete(self, mock_request):
+ '''Test successful delete'''
+ data = self.mock_args(rest=True)
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['svm_record'], # get
+ SRR['empty_good'], # delete
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_delete_idempotency(self, mock_request):
+ '''Test delete idempotency'''
+ data = self.mock_args(rest=True)
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['empty_good'], # get
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_rename(self, mock_request):
+ '''Test successful rename'''
+ data = self.mock_args(rest=True)
+ data['from_name'] = 'test_svm'
+ data['name'] = 'test_new_svm'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['svm_record'], # get
+ SRR['svm_record'], # get
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_modify_language(self, mock_request):
+ '''Test successful modify language'''
+ data = self.mock_args(rest=True)
+ data['language'] = 'c'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['svm_record'], # get
+ SRR['svm_record'], # get
+ SRR['empty_good'], # patch
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_template.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_template.py
new file mode 100644
index 00000000..60648253
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_template.py
@@ -0,0 +1,121 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cg_snapshot \
+ import NetAppONTAPCGSnapshot as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'vserver':
+ xml = self.build_vserver_info(self.parm1)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_vserver_info(vserver):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('attributes-list')
+ attributes.add_node_with_children('vserver-info',
+ **{'vserver-name': vserver})
+ xml.add_child_elem(attributes)
+ # print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_command_called(self):
+ ''' a more interesting test '''
+# TODO: change argument names/values
+ set_module_args({
+ 'vserver': 'vserver',
+ 'volumes': 'volumes',
+ 'snapshot': 'snapshot',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ my_obj = my_module()
+ my_obj.server = self.server
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # Hint: start with get methods, as they are called first
+ my_obj.apply()
+# TODO: change message, and maybe test contents
+ msg = 'Error fetching CG ID for CG commit snapshot'
+ assert exc.value.args[0]['msg'] == msg
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ucadapter.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ucadapter.py
new file mode 100644
index 00000000..6b8459a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ucadapter.py
@@ -0,0 +1,176 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_ucadapter '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ucadapter \
+ import NetAppOntapadapter as ucadapter_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'ucadapter':
+ xml = self.build_ucadapter_info(self.parm1)
+ self.xml_out = xml
+ return xml
+
+ def autosupport_log(self):
+ ''' mock autosupport log'''
+ return None
+
+ @staticmethod
+ def build_ucadapter_info(params):
+ ''' build xml data for ucadapter_info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'attributes': {'uc-adapter-info': {
+ 'mode': 'fc',
+ 'pending-mode': 'abc',
+ 'type': 'target',
+ 'pending-type': 'intitiator',
+ 'status': params['status'],
+ }}}
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.use_vsim = False
+ self.mock_ucadapter = {
+ 'mode': 'fc',
+ 'pending-mode': 'fc',
+ 'type': 'target',
+ 'pending-type': 'intitiator',
+ 'status': 'up',
+ }
+
+ def set_default_args(self):
+ args = (dict({
+ 'hostname': '10.0.0.0',
+ 'username': 'user',
+ 'password': 'pass',
+ 'node_name': 'node1',
+ 'adapter_name': '0f',
+ 'mode': self.mock_ucadapter['mode'],
+ 'type': self.mock_ucadapter['type']
+ }))
+ return args
+
+ def get_ucadapter_mock_object(self, kind=None, data=None):
+ """
+ Helper method to return an na_ontap_unix_user object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_unix_user object
+ """
+ obj = ucadapter_module()
+ obj.autosupport_log = Mock(return_value=None)
+ params = self.mock_ucadapter
+ if data is not None:
+ for k, v in data.items():
+ params[k] = v
+ obj.server = MockONTAPConnection(kind=kind, data=params)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ ucadapter_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_ucadapter_get_called(self):
+ ''' fetching ucadapter details '''
+ set_module_args(self.set_default_args())
+ get_adapter = self.get_ucadapter_mock_object().get_adapter()
+ print('Info: test_ucadapter_get: %s' % repr(get_adapter))
+ assert get_adapter is None
+
+ def test_change_mode_from_cna_to_fc(self):
+ ''' configuring ucadaptor and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ucadapter_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ucadapter_mock_object('ucadapter', {'mode': 'cna', 'pending-mode': 'cna'}).apply()
+ assert exc.value.args[0]['changed']
+
+ module_args['type'] = 'intitiator'
+ set_module_args(module_args)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ucadapter_mock_object('ucadapter', {'mode': 'cna', 'pending-mode': 'cna'}).apply()
+ assert exc.value.args[0]['changed']
+
+ def test_change_mode_from_fc_to_cna(self):
+ module_args = self.set_default_args()
+ module_args['mode'] = 'cna'
+ del module_args['type']
+ set_module_args(module_args)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_ucadapter_mock_object('ucadapter').apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_group.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_group.py
new file mode 100644
index 00000000..016e951b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_group.py
@@ -0,0 +1,289 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group \
+ import NetAppOntapUnixGroup as group_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'group':
+ xml = self.build_group_info(self.params)
+ elif self.kind == 'group-fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_group_info(data):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = \
+ {'attributes-list': {'unix-group-info': {'group-name': data['name'],
+ 'group-id': data['id']}},
+ 'num-records': 1}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_group = {
+ 'name': 'test',
+ 'id': '11',
+ 'vserver': 'something',
+ }
+
+ def mock_args(self):
+ return {
+ 'name': self.mock_group['name'],
+ 'id': self.mock_group['id'],
+ 'vserver': self.mock_group['vserver'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_group_mock_object(self, kind=None, data=None):
+ """
+ Helper method to return an na_ontap_unix_group object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_unix_group object
+ """
+ obj = group_module()
+ obj.autosupport_log = Mock(return_value=None)
+ if data is None:
+ data = self.mock_group
+ obj.server = MockONTAPConnection(kind=kind, data=data)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ group_module()
+
+ def test_get_nonexistent_group(self):
+ ''' Test if get_unix_group returns None for non-existent group '''
+ set_module_args(self.mock_args())
+ result = self.get_group_mock_object().get_unix_group()
+ assert result is None
+
+ def test_get_existing_group(self):
+ ''' Test if get_unix_group returns details for existing group '''
+ set_module_args(self.mock_args())
+ result = self.get_group_mock_object('group').get_unix_group()
+ assert result['name'] == self.mock_group['name']
+
+ def test_get_xml(self):
+ set_module_args(self.mock_args())
+ obj = self.get_group_mock_object('group')
+ result = obj.get_unix_group()
+ assert obj.server.xml_in['query']
+ assert obj.server.xml_in['query']['unix-group-info']
+ group_info = obj.server.xml_in['query']['unix-group-info']
+ assert group_info['group-name'] == self.mock_group['name']
+ assert group_info['vserver'] == self.mock_group['vserver']
+
+ def test_create_error_missing_params(self):
+ data = self.mock_args()
+ del data['id']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_group_mock_object('group').create_unix_group()
+ assert 'Error: Missing a required parameter for create: (id)' == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.create_unix_group')
+ def test_create_called(self, create_group):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_group_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ create_group.assert_called_with()
+
+ def test_create_xml(self):
+ '''Test create ZAPI element'''
+ set_module_args(self.mock_args())
+ create = self.get_group_mock_object()
+ with pytest.raises(AnsibleExitJson) as exc:
+ create.apply()
+ mock_key = {
+ 'group-name': 'name',
+ 'group-id': 'id',
+ }
+ for key in ['group-name', 'group-id']:
+ assert create.server.xml_in[key] == self.mock_group[mock_key[key]]
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.modify_unix_group')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.delete_unix_group')
+ def test_delete_called(self, delete_group, modify_group):
+ ''' Test delete existing group '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_group_mock_object('group').apply()
+ assert exc.value.args[0]['changed']
+ delete_group.assert_called_with()
+ assert modify_group.call_count == 0
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.get_unix_group')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.modify_unix_group')
+ def test_modify_called(self, modify_group, get_group):
+ ''' Test modify group group_id '''
+ data = self.mock_args()
+ data['id'] = 20
+ set_module_args(data)
+ get_group.return_value = {'id': 10}
+ obj = self.get_group_mock_object('group')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ get_group.assert_called_with()
+ modify_group.assert_called_with({'id': 20})
+
+ def test_modify_only_id(self):
+ ''' Test modify group id '''
+ set_module_args(self.mock_args())
+ modify = self.get_group_mock_object('group')
+ modify.modify_unix_group({'id': 123})
+ print(modify.server.xml_in.to_string())
+ assert modify.server.xml_in['group-id'] == '123'
+ with pytest.raises(KeyError):
+ modify.server.xml_in['id']
+
+ def test_modify_xml(self):
+ ''' Test modify group full_name '''
+ set_module_args(self.mock_args())
+ modify = self.get_group_mock_object('group')
+ modify.modify_unix_group({'id': 25})
+ assert modify.server.xml_in['group-name'] == self.mock_group['name']
+ assert modify.server.xml_in['group-id'] == '25'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.create_unix_group')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.delete_unix_group')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.modify_unix_group')
+ def test_do_nothing(self, modify, delete, create):
+ ''' changed is False and none of the opetaion methods are called'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ obj = self.get_group_mock_object()
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ create.assert_not_called()
+ delete.assert_not_called()
+ modify.assert_not_called()
+
+ def test_get_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_group_mock_object('group-fail').get_unix_group()
+ assert 'Error getting UNIX group' in exc.value.args[0]['msg']
+
+ def test_create_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_group_mock_object('group-fail').create_unix_group()
+ assert 'Error creating UNIX group' in exc.value.args[0]['msg']
+
+ def test_modify_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_group_mock_object('group-fail').modify_unix_group({'id': '123'})
+ assert 'Error modifying UNIX group' in exc.value.args[0]['msg']
+
+ def test_delete_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_group_mock_object('group-fail').delete_unix_group()
+ assert 'Error removing UNIX group' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.get_unix_group')
+ def test_add_user_exception(self, get_unix_group):
+ data = self.mock_args()
+ data['users'] = 'test_user'
+ set_module_args(data)
+ get_unix_group.side_effect = [
+ {'users': []}
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_group_mock_object('group-fail').modify_users_in_group()
+ print(exc.value.args[0]['msg'])
+ assert 'Error adding user' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group.NetAppOntapUnixGroup.get_unix_group')
+ def test_delete_user_exception(self, get_unix_group):
+ data = self.mock_args()
+ data['users'] = ''
+ set_module_args(data)
+ get_unix_group.side_effect = [
+ {'users': ['test_user']}
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_group_mock_object('group-fail').modify_users_in_group()
+ print(exc.value.args[0]['msg'])
+ assert 'Error deleting user' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_user.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_user.py
new file mode 100644
index 00000000..3b9e5bce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_user.py
@@ -0,0 +1,283 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user \
+ import NetAppOntapUnixUser as user_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'user':
+ xml = self.build_user_info(self.params)
+ elif self.kind == 'user-fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_user_info(data):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = \
+ {'attributes-list': {'unix-user-info': {'user-id': data['id'],
+ 'group-id': data['group_id'], 'full-name': data['full_name']}},
+ 'num-records': 1}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.mock_user = {
+ 'name': 'test',
+ 'id': '11',
+ 'group_id': '12',
+ 'vserver': 'something',
+ 'full_name': 'Test User'
+ }
+
+ def mock_args(self):
+ return {
+ 'name': self.mock_user['name'],
+ 'group_id': self.mock_user['group_id'],
+ 'id': self.mock_user['id'],
+ 'vserver': self.mock_user['vserver'],
+ 'full_name': self.mock_user['full_name'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_user_mock_object(self, kind=None, data=None):
+ """
+ Helper method to return an na_ontap_unix_user object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_unix_user object
+ """
+ obj = user_module()
+ obj.autosupport_log = Mock(return_value=None)
+ if data is None:
+ data = self.mock_user
+ obj.server = MockONTAPConnection(kind=kind, data=data)
+ return obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ user_module()
+
+ def test_get_nonexistent_user(self):
+ ''' Test if get_unix_user returns None for non-existent user '''
+ set_module_args(self.mock_args())
+ result = self.get_user_mock_object().get_unix_user()
+ assert result is None
+
+ def test_get_existing_user(self):
+ ''' Test if get_unix_user returns details for existing user '''
+ set_module_args(self.mock_args())
+ result = self.get_user_mock_object('user').get_unix_user()
+ assert result['full_name'] == self.mock_user['full_name']
+
+ def test_get_xml(self):
+ set_module_args(self.mock_args())
+ obj = self.get_user_mock_object('user')
+ result = obj.get_unix_user()
+ assert obj.server.xml_in['query']
+ assert obj.server.xml_in['query']['unix-user-info']
+ user_info = obj.server.xml_in['query']['unix-user-info']
+ assert user_info['user-name'] == self.mock_user['name']
+ assert user_info['vserver'] == self.mock_user['vserver']
+
+ def test_create_error_missing_params(self):
+ data = self.mock_args()
+ del data['group_id']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_user_mock_object('user').create_unix_user()
+ assert 'Error: Missing one or more required parameters for create: (group_id, id)' == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user.NetAppOntapUnixUser.create_unix_user')
+ def test_create_called(self, create_user):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_user_mock_object().apply()
+ assert exc.value.args[0]['changed']
+ create_user.assert_called_with()
+
+ def test_create_xml(self):
+ '''Test create ZAPI element'''
+ set_module_args(self.mock_args())
+ create = self.get_user_mock_object()
+ with pytest.raises(AnsibleExitJson) as exc:
+ create.apply()
+ mock_key = {
+ 'user-name': 'name',
+ 'group-id': 'group_id',
+ 'user-id': 'id',
+ 'full-name': 'full_name'
+ }
+ for key in ['user-name', 'user-id', 'group-id', 'full-name']:
+ assert create.server.xml_in[key] == self.mock_user[mock_key[key]]
+
+ def test_create_wihtout_full_name(self):
+ '''Test create ZAPI element'''
+ data = self.mock_args()
+ del data['full_name']
+ set_module_args(data)
+ create = self.get_user_mock_object()
+ with pytest.raises(AnsibleExitJson) as exc:
+ create.apply()
+ with pytest.raises(KeyError):
+ create.server.xml_in['full-name']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user.NetAppOntapUnixUser.delete_unix_user')
+ def test_delete_called(self, delete_user, modify_user):
+ ''' Test delete existing user '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_user_mock_object('user').apply()
+ assert exc.value.args[0]['changed']
+ delete_user.assert_called_with()
+ assert modify_user.call_count == 0
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user.NetAppOntapUnixUser.get_unix_user')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
+ def test_modify_called(self, modify_user, get_user):
+ ''' Test modify user group_id '''
+ data = self.mock_args()
+ data['group_id'] = 20
+ set_module_args(data)
+ get_user.return_value = {'group_id': 10}
+ obj = self.get_user_mock_object('user')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ get_user.assert_called_with()
+ modify_user.assert_called_with({'group_id': 20})
+
+ def test_modify_only_id(self):
+ ''' Test modify user id '''
+ set_module_args(self.mock_args())
+ modify = self.get_user_mock_object('user')
+ modify.modify_unix_user({'id': 123})
+ assert modify.server.xml_in['user-id'] == '123'
+ with pytest.raises(KeyError):
+ modify.server.xml_in['group-id']
+ with pytest.raises(KeyError):
+ modify.server.xml_in['full-name']
+
+ def test_modify_xml(self):
+ ''' Test modify user full_name '''
+ set_module_args(self.mock_args())
+ modify = self.get_user_mock_object('user')
+ modify.modify_unix_user({'full_name': 'New Name',
+ 'group_id': '25'})
+ assert modify.server.xml_in['user-name'] == self.mock_user['name']
+ assert modify.server.xml_in['full-name'] == 'New Name'
+ assert modify.server.xml_in['group-id'] == '25'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user.NetAppOntapUnixUser.create_unix_user')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user.NetAppOntapUnixUser.delete_unix_user')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
+ def test_do_nothing(self, modify, delete, create):
+ ''' changed is False and none of the opetaion methods are called'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ obj = self.get_user_mock_object()
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ create.assert_not_called()
+ delete.assert_not_called()
+ modify.assert_not_called()
+
+ def test_get_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_user_mock_object('user-fail').get_unix_user()
+ assert 'Error getting UNIX user' in exc.value.args[0]['msg']
+
+ def test_create_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_user_mock_object('user-fail').create_unix_user()
+ assert 'Error creating UNIX user' in exc.value.args[0]['msg']
+
+ def test_modify_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_user_mock_object('user-fail').modify_unix_user({'id': '123'})
+ assert 'Error modifying UNIX user' in exc.value.args[0]['msg']
+
+ def test_delete_exception(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_user_mock_object('user-fail').delete_unix_user()
+ assert 'Error removing UNIX user' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user.py
new file mode 100644
index 00000000..a43ac6f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user.py
@@ -0,0 +1,505 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_user '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_user \
+ import NetAppOntapUser as my_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"),
+ 'generic_error': (400, None, "Expected error"),
+ 'get_uuid': (200, {'owner': {'uuid': 'ansible'}}, None),
+ 'get_user_rest': (200,
+ {'num_records': 1,
+ 'records': [{'owner': {'uuid': 'ansible_vserver'},
+ 'name': 'abcd'}]}, None),
+ 'get_user_details_rest': (200,
+ {'role': {'name': 'vsadmin'},
+ 'applications': [{'application': 'http'}],
+ 'locked': False}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+def set_default_args_rest():
+ return dict({
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'user_name',
+ 'vserver': 'vserver',
+ 'applications': 'http',
+ 'authentication_method': 'password',
+ 'role_name': 'vsadmin',
+ 'lock_user': 'True',
+ })
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, parm1=None, parm2=None):
+ ''' save arguments '''
+ self.type = kind
+ self.parm1 = parm1
+ self.parm2 = parm2
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'user':
+ xml = self.build_user_info(self.parm1, self.parm2)
+ elif self.type == 'user_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def set_vserver(vserver):
+ '''mock set vserver'''
+
+ @staticmethod
+ def build_user_info(locked, role_name):
+ ''' build xml data for user-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'num-records': 1,
+ 'attributes-list': {'security-login-account-info': {'is-locked': locked, 'role-name': role_name}}}
+
+ xml.translate_struct(data)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.server = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self, rest=False):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'username'
+ password = 'password'
+ user_name = 'test'
+ vserver = 'ansible_test'
+ application = 'console'
+ authentication_method = 'password'
+ else:
+ hostname = 'hostname'
+ username = 'username'
+ password = 'password'
+ user_name = 'name'
+ vserver = 'vserver'
+ application = 'console'
+ authentication_method = 'password'
+ if rest:
+ use_rest = 'auto'
+ else:
+ use_rest = 'never'
+
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'use_rest': use_rest,
+ 'name': user_name,
+ 'vserver': vserver,
+ 'applications': application,
+ 'authentication_method': authentication_method
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_user_get_called(self):
+ ''' a more interesting test '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'role_name': 'test'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ my_obj.server = self.server
+ user_info = my_obj.get_user()
+ print('Info: test_user_get: %s' % repr(user_info))
+ assert user_info is None
+
+ def test_ensure_user_apply_called(self):
+ ''' creating user and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'name': 'create'})
+ module_args.update({'role_name': 'test'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = self.server
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'false')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_ensure_user_apply_for_delete_called(self):
+ ''' deleting user and checking idempotency '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'name': 'create'})
+ module_args.update({'role_name': 'test'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'false', 'test')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ module_args.update({'state': 'absent'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'false', 'test')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_delete: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_ensure_user_lock_called(self):
+ ''' changing user_lock to True and checking idempotency'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'name': 'create'})
+ module_args.update({'role_name': 'test'})
+ module_args.update({'lock_user': 'false'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'false', 'test')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ module_args.update({'lock_user': 'true'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'false')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_lock: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_ensure_user_unlock_called(self):
+ ''' changing user_lock to False and checking idempotency'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'name': 'create'})
+ module_args.update({'role_name': 'test'})
+ module_args.update({'lock_user': 'false'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'false', 'test')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_apply: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ module_args.update({'lock_user': 'false'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'true', 'test')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_unlock: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_ensure_user_set_password_called(self):
+ ''' set password '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'name': 'create'})
+ module_args.update({'role_name': 'test'})
+ module_args.update({'set_password': '123456'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'true')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_ensure_user_role_update_called(self):
+ ''' set password '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'name': 'create'})
+ module_args.update({'role_name': 'test123'})
+ module_args.update({'set_password': '123456'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'true')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_ensure_user_role_update_additional_application_called(self):
+ ''' set password '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'name': 'create'})
+ module_args.update({'role_name': 'test123'})
+ module_args.update({'application': 'http'})
+ module_args.update({'set_password': '123456'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user', 'true')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_user_apply: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ data = self.set_default_args()
+ data.update({'role_name': 'test'})
+ set_module_args(data)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.server = MockONTAPConnection('user_fail')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_user()
+ assert 'Error getting user ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_user(data['applications'])
+ assert 'Error creating user ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.lock_given_user()
+ assert 'Error locking user ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.unlock_given_user()
+ assert 'Error unlocking user ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.delete_user(data['applications'])
+ assert 'Error removing user ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.change_password()
+ assert 'Error setting password for user ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.modify_user(data['applications'])
+ assert 'Error modifying user ' in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error_applications_snmp(self, mock_request):
+ data = self.set_default_args(rest=True)
+ data.update({'applications': 'snmp'})
+ data.update({'name': 'create'})
+ data.update({'role_name': 'test123'})
+ data.update({'set_password': '123456'})
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ assert exc.value.args[0]['msg'] == "Snmp as application is not supported in REST."
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_ensure_user_get_rest_called(mock_request, mock_fail):
+ mock_fail.side_effect = fail_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_user_rest'],
+ SRR['end_of_sequence']
+ ]
+ set_module_args(set_default_args_rest())
+ my_obj = my_module()
+ assert my_obj.get_user_rest() is not None
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_ensure_create_user_rest_called(mock_request, mock_fail, mock_exit):
+ mock_fail.side_effect = fail_json
+ mock_exit.side_effect = exit_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_user_rest'],
+ SRR['get_user_details_rest'],
+ SRR['get_user_rest'],
+ ]
+ set_module_args(set_default_args_rest())
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_ensure_delete_user_rest_called(mock_request, mock_fail, mock_exit):
+ mock_fail.side_effect = fail_json
+ mock_exit.side_effect = exit_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_user_rest'],
+ SRR['get_user_details_rest'],
+ SRR['get_user_rest'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'state': 'absent',
+ }
+ data.update(set_default_args_rest())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_ensure_modify_user_rest_called(mock_request, mock_fail, mock_exit):
+ mock_fail.side_effect = fail_json
+ mock_exit.side_effect = exit_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_user_rest'],
+ SRR['get_user_details_rest'],
+ SRR['get_user_rest'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'application': 'ssh',
+ }
+ data.update(set_default_args_rest())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_ensure_lock_unlock_user_rest_called(mock_request, mock_fail, mock_exit):
+ mock_fail.side_effect = fail_json
+ mock_exit.side_effect = exit_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_user_rest'],
+ SRR['get_user_details_rest'],
+ SRR['get_user_rest'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'lock_user': 'newvalue',
+ }
+ data.update(set_default_args_rest())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+def test_ensure_change_password_user_rest_called(mock_request, mock_fail, mock_exit):
+ mock_fail.side_effect = fail_json
+ mock_exit.side_effect = exit_json
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_user_rest'],
+ SRR['get_user_details_rest'],
+ SRR['get_user_rest'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ data = {
+ 'password': 'newvalue',
+ }
+ data.update(set_default_args_rest())
+ set_module_args(data)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role.py
new file mode 100644
index 00000000..f8b3ce82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role.py
@@ -0,0 +1,239 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_user_role \
+ import NetAppOntapUserRole as role_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'role':
+ xml = self.build_role_info(self.params)
+ if self.kind == 'error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_role_info(vol_details):
+ ''' build xml data for role-attributes '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'security-login-role-info': {
+ 'access-level': 'all',
+ 'command-directory-name': 'volume',
+ 'role-name': 'testrole',
+ 'role-query': 'show',
+ 'vserver': 'ansible'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_role = {
+ 'name': 'testrole',
+ 'access_level': 'all',
+ 'command_directory_name': 'volume',
+ 'vserver': 'ansible'
+ }
+
+ def mock_args(self):
+ return {
+ 'name': self.mock_role['name'],
+ 'vserver': self.mock_role['vserver'],
+ 'command_directory_name': self.mock_role['command_directory_name'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': 'False'
+ }
+
+ def get_role_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_user_role object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_user_role object
+ """
+ role_obj = role_module()
+ role_obj.asup_log_for_cserver = Mock(return_value=None)
+ role_obj.cluster = Mock()
+ role_obj.cluster.invoke_successfully = Mock()
+ if kind is None:
+ role_obj.server = MockONTAPConnection()
+ else:
+ role_obj.server = MockONTAPConnection(kind=kind, data=self.mock_role)
+ return role_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ role_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_policy(self):
+ ''' Test if get_role returns None for non-existent role '''
+ set_module_args(self.mock_args())
+ result = self.get_role_mock_object().get_role()
+ assert result is None
+
+ def test_get_existing_role(self):
+ ''' Test if get_role returns details for existing role '''
+ set_module_args(self.mock_args())
+ result = self.get_role_mock_object('role').get_role()
+ assert result['name'] == self.mock_role['name']
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_role_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ data = self.mock_args()
+ data['query'] = 'show'
+ set_module_args(data)
+ obj = self.get_role_mock_object('role')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_user_role.NetAppOntapUserRole.get_role')
+ def test_create_error(self, get_role):
+ ''' Test create error '''
+ set_module_args(self.mock_args())
+ get_role.side_effect = [
+ None
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_role_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error creating role testrole: NetApp API failed. Reason - test:error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_user_role.NetAppOntapUserRole.get_role')
+ def test_successful_modify(self, get_role):
+ ''' Test successful modify '''
+ data = self.mock_args()
+ data['query'] = 'show'
+ set_module_args(data)
+ current = self.mock_role
+ current['query'] = 'show-space'
+ get_role.side_effect = [
+ current
+ ]
+ obj = self.get_role_mock_object()
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_user_role.NetAppOntapUserRole.get_role')
+ def test_modify_idempotency(self, get_role):
+ ''' Test modify idempotency '''
+ data = self.mock_args()
+ data['query'] = 'show'
+ set_module_args(data)
+ current = self.mock_role
+ current['query'] = 'show'
+ get_role.side_effect = [
+ current
+ ]
+ obj = self.get_role_mock_object()
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_user_role.NetAppOntapUserRole.get_role')
+ def test_modify_error(self, get_role):
+ ''' Test modify error '''
+ data = self.mock_args()
+ data['query'] = 'show'
+ set_module_args(data)
+ current = self.mock_role
+ current['query'] = 'show-space'
+ get_role.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_role_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying role testrole: NetApp API failed. Reason - test:error'
+
+ def test_successful_delete(self):
+ ''' Test delete existing role '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_role_mock_object('role').apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume.py
new file mode 100644
index 00000000..48c34856
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume.py
@@ -0,0 +1,1183 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume \
+ import NetAppOntapVolume as vol_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None, job_error=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+ self.job_error = job_error
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ # print("request: ", xml.to_string())
+ request = xml.to_string().decode('utf-8')
+ print(request)
+ if request.startswith('<sis-get-iter>'):
+ return self.build_sis_info()
+ if isinstance(self.kind, list):
+ kind = self.kind.pop(0)
+ if len(self.kind) == 0:
+ # loop over last element
+ self.kind = kind
+ else:
+ kind = self.kind
+
+ if kind == 'volume':
+ xml = self.build_volume_info(self.params)
+ elif kind == 'job_info':
+ xml = self.build_job_info(self.job_error)
+ elif kind == 'error_modify':
+ xml = self.build_modify_error()
+ elif kind == 'failure_modify_async':
+ xml = self.build_failure_modify_async()
+ elif kind == 'missing_element_modify_async':
+ xml = self.build_missing_element_modify_async()
+ elif kind == 'success_modify_async':
+ xml = self.build_success_modify_async()
+ elif kind == 'zapi_error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ # print(xml.to_string())
+ return xml
+
+ @staticmethod
+ def build_volume_info(vol_details):
+ ''' build xml data for volume-attributes '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'volume-attributes': {
+ 'volume-id-attributes': {
+ 'containing-aggregate-name': vol_details['aggregate'],
+ 'junction-path': vol_details['junction_path'],
+ 'style-extended': 'flexvol'
+ },
+ 'volume-language-attributes': {
+ 'language-code': 'en'
+ },
+ 'volume-export-attributes': {
+ 'policy': 'default'
+ },
+ 'volume-performance-attributes': {
+ 'is-atime-update-enabled': 'true'
+ },
+ 'volume-state-attributes': {
+ 'state': "online",
+ 'is-nvfail-enabled': 'true'
+ },
+ 'volume-space-attributes': {
+ 'space-guarantee': 'none',
+ 'size': vol_details['size'],
+ 'percentage-snapshot-reserve': vol_details['percent_snapshot_space'],
+ 'space-slo': 'thick'
+ },
+ 'volume-snapshot-attributes': {
+ 'snapshot-policy': vol_details['snapshot_policy']
+ },
+ 'volume-comp-aggr-attributes': {
+ 'tiering-policy': 'snapshot-only'
+ },
+ 'volume-security-attributes': {
+ 'style': 'unix',
+ 'volume-security-unix-attributes': {
+ 'permissions': vol_details['unix_permissions'],
+ 'group-id': vol_details['group_id'],
+ 'user-id': vol_details['user_id']
+ }
+ },
+ 'volume-vserver-dr-protection-attributes': {
+ 'vserver-dr-protection': vol_details['vserver_dr_protection'],
+ },
+ 'volume-qos-attributes': {
+ 'policy-group-name': vol_details['qos_policy_group'],
+ 'adaptive-policy-group-name': vol_details['qos_adaptive_policy_group']
+ },
+ 'volume-snapshot-autodelete-attributes': {
+ 'commitment': 'try'
+ }
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_flex_group_info(vol_details):
+ ''' build xml data for flexGroup volume-attributes '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'volume-attributes': {
+ 'volume-id-attributes': {
+ 'aggr-list': vol_details['aggregate'],
+ 'junction-path': vol_details['junction_path'],
+ 'style-extended': 'flexgroup'
+ },
+ 'volume-language-attributes': {
+ 'language-code': 'en'
+ },
+ 'volume-export-attributes': {
+ 'policy': 'default'
+ },
+ 'volume-performance-attributes': {
+ 'is-atime-update-enabled': 'true'
+ },
+ 'volume-state-attributes': {
+ 'state': "online"
+ },
+ 'volume-space-attributes': {
+ 'space-guarantee': 'none',
+ 'size': vol_details['size']
+ },
+ 'volume-snapshot-attributes': {
+ 'snapshot-policy': vol_details['snapshot_policy']
+ },
+ 'volume-security-attributes': {
+ 'volume-security-unix-attributes': {
+ 'permissions': vol_details['unix_permissions']
+ }
+ }
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+ @staticmethod
+ def build_job_info(error):
+ ''' build xml data for a job '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('attributes')
+ if error is None:
+ state = 'success'
+ elif error == 'time_out':
+ state = 'running'
+ elif error == 'failure':
+ state = 'failure'
+ else:
+ state = 'other'
+ attributes.add_node_with_children('job-info', **{
+ 'job-state': state,
+ 'job-progress': 'dummy',
+ 'job-completion': error,
+ })
+ xml.add_child_elem(attributes)
+ xml.add_new_child('result-status', 'in_progress')
+ xml.add_new_child('result-jobid', '1234')
+ return xml
+
+ @staticmethod
+ def build_modify_error():
+ ''' build xml data for modify error '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('failure-list')
+ info_list_obj = netapp_utils.zapi.NaElement('volume-modify-iter-info')
+ info_list_obj.add_new_child('error-message', 'modify error message')
+ attributes.add_child_elem(info_list_obj)
+ xml.add_child_elem(attributes)
+ return xml
+
+ @staticmethod
+ def build_success_modify_async():
+ ''' build xml data for success modify async '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('success-list')
+ info_list_obj = netapp_utils.zapi.NaElement('volume-modify-iter-async-info')
+ info_list_obj.add_new_child('status', 'in_progress')
+ info_list_obj.add_new_child('jobid', '1234')
+ attributes.add_child_elem(info_list_obj)
+ xml.add_child_elem(attributes)
+ return xml
+
+ @staticmethod
+ def build_missing_element_modify_async():
+ ''' build xml data for success modify async '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('success-list')
+ info_list_obj = netapp_utils.zapi.NaElement('volume-modify-iter-info') # no async!
+ info_list_obj.add_new_child('status', 'in_progress')
+ info_list_obj.add_new_child('jobid', '1234')
+ attributes.add_child_elem(info_list_obj)
+ xml.add_child_elem(attributes)
+ return xml
+
+ @staticmethod
+ def build_failure_modify_async():
+ ''' build xml data for failure modify async '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = netapp_utils.zapi.NaElement('failure-list')
+ info_list_obj = netapp_utils.zapi.NaElement('volume-modify-iter-async-info')
+ info_list_obj.add_new_child('status', 'failed')
+ info_list_obj.add_new_child('jobid', '1234')
+ info_list_obj.add_new_child('error-message', 'modify error message')
+ attributes.add_child_elem(info_list_obj)
+ xml.add_child_elem(attributes)
+ return xml
+
+ @staticmethod
+ def build_sis_info():
+ ''' build xml data for sis config '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'sis-status-info': {
+ 'policy': 'testme'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_vol = {
+ 'name': 'test_vol',
+ 'aggregate': 'test_aggr',
+ 'junction_path': '/test',
+ 'vserver': 'test_vserver',
+ 'size': 20971520,
+ 'unix_permissions': '755',
+ 'user_id': 100,
+ 'group_id': 1000,
+ 'snapshot_policy': 'default',
+ 'qos_policy_group': 'performance',
+ 'qos_adaptive_policy_group': 'performance',
+ 'percent_snapshot_space': 60,
+ 'language': 'en',
+ 'vserver_dr_protection': 'unprotected'
+ }
+
+ def mock_args(self, tag=None):
+ args = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'policy': 'default',
+ 'language': self.mock_vol['language'],
+ 'is_online': True,
+ 'unix_permissions': '---rwxr-xr-x',
+ 'user_id': 100,
+ 'group_id': 1000,
+ 'snapshot_policy': 'default',
+ 'qos_policy_group': 'performance',
+ 'qos_adaptive_policy_group': 'performance',
+ 'size': 20,
+ 'size_unit': 'mb',
+ 'junction_path': '/test',
+ 'percent_snapshot_space': 60,
+ 'type': 'type',
+ 'nvfail_enabled': True,
+ 'space_slo': 'thick'
+ }
+ if tag is None:
+ args['aggregate_name'] = self.mock_vol['aggregate']
+ return args
+
+ elif tag == 'flexGroup_manual':
+ args['aggr_list'] = 'aggr_0,aggr_1'
+ args['aggr_list_multiplier'] = 2
+ return args
+
+ elif tag == 'flexGroup_auto':
+ args['auto_provision_as'] = 'flexgroup'
+ return args
+
+ def get_volume_mock_object(self, kind=None, job_error=None):
+ """
+ Helper method to return an na_ontap_volume object
+ :param kind: passes this param to MockONTAPConnection().
+ :param job_error: error message when getting job status.
+ :return: na_ontap_volume object
+ """
+ vol_obj = vol_module()
+ vol_obj.ems_log_event = Mock(return_value=None)
+ vol_obj.get_efficiency_policy = Mock(return_value='test_efficiency')
+ vol_obj.volume_style = None
+ if kind is None:
+ vol_obj.server = MockONTAPConnection()
+ elif kind == 'job_info':
+ vol_obj.server = MockONTAPConnection(kind='job_info', data=self.mock_vol, job_error=job_error)
+ vol_obj.cluster = MockONTAPConnection(kind='job_info', data=self.mock_vol, job_error=job_error)
+ else:
+ vol_obj.server = MockONTAPConnection(kind=kind, data=self.mock_vol)
+ vol_obj.cluster = MockONTAPConnection(kind=kind, data=self.mock_vol)
+
+ return vol_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ vol_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_volume(self):
+ ''' Test if get_volume returns None for non-existent volume '''
+ set_module_args(self.mock_args())
+ result = self.get_volume_mock_object().get_volume()
+ assert result is None
+
+ def test_get_existing_volume(self):
+ ''' Test if get_volume returns details for existing volume '''
+ set_module_args(self.mock_args())
+ result = self.get_volume_mock_object('volume').get_volume()
+ assert result['name'] == self.mock_vol['name']
+ assert result['size'] == self.mock_vol['size']
+
+ def test_create_error_missing_param(self):
+ ''' Test if create throws an error if aggregate_name is not specified'''
+ data = self.mock_args()
+ del data['aggregate_name']
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object('volume').create_volume()
+ msg = 'Error provisioning volume test_vol: aggregate_name is required'
+ assert exc.value.args[0]['msg'] == msg
+
+ def test_successful_create(self):
+ ''' Test successful create '''
+ data = self.mock_args()
+ data['size'] = 20
+ data['encrypt'] = True
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ ''' Test create idempotency '''
+ set_module_args(self.mock_args())
+ obj = self.get_volume_mock_object('volume')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_delete(self):
+ ''' Test delete existing volume '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ ''' Test delete idempotency '''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify_size(self):
+ ''' Test successful modify size '''
+ data = self.mock_args()
+ data['size'] = 200
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_modify_idempotency(self):
+ ''' Test modify idempotency '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_modify_error(self):
+ ''' Test modify idempotency '''
+ data = self.mock_args()
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object('error_modify').volume_modify_attributes(dict())
+ assert exc.value.args[0]['msg'] == 'Error modifying volume test_vol: modify error message'
+
+ def test_mount_volume(self):
+ ''' Test mount volume '''
+ data = self.mock_args()
+ data['junction_path'] = "/test123"
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_unmount_volume(self):
+ ''' Test unmount volume '''
+ data = self.mock_args()
+ data['junction_path'] = ""
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_space(self):
+ ''' Test successful modify space '''
+ data = self.mock_args()
+ del data['space_slo']
+ data['space_guarantee'] = 'volume'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_unix_permissions(self):
+ ''' Test successful modify unix_permissions '''
+ data = self.mock_args()
+ data['unix_permissions'] = '---rw-r-xr-x'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_snapshot_policy(self):
+ ''' Test successful modify snapshot_policy '''
+ data = self.mock_args()
+ data['snapshot_policy'] = 'default-1weekly'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_efficiency_policy(self):
+ ''' Test successful modify efficiency_policy '''
+ data = self.mock_args()
+ data['efficiency_policy'] = 'test'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_percent_snapshot_space(self):
+ ''' Test successful modify percent_snapshot_space '''
+ data = self.mock_args()
+ data['percent_snapshot_space'] = '90'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_qos_policy_group(self):
+ ''' Test successful modify qos_policy_group '''
+ data = self.mock_args()
+ data['qos_policy_group'] = 'extreme'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_qos_adaptive_policy_group(self):
+ ''' Test successful modify qos_adaptive_policy_group '''
+ data = self.mock_args()
+ data['qos_adaptive_policy_group'] = 'extreme'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_move(self):
+ ''' Test successful modify aggregate '''
+ data = self.mock_args()
+ data['aggregate_name'] = 'different_aggr'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_rename(self, get_volume):
+ ''' Test successful rename volume '''
+ data = self.mock_args()
+ data['from_name'] = self.mock_vol['name']
+ data['name'] = 'new_name'
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ }
+ get_volume.side_effect = [
+ None,
+ current
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_rename_async(self, get_volume):
+ ''' Test successful rename volume '''
+ data = self.mock_args()
+ data['from_name'] = self.mock_vol['name']
+ data['name'] = 'new_name'
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'is_infinite': True
+ }
+ get_volume.side_effect = [
+ None,
+ current
+ ]
+ obj = self.get_volume_mock_object('job_info')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.change_volume_state')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.volume_mount')
+ def test_modify_helper(self, mount_volume, change_state):
+ data = self.mock_args()
+ set_module_args(data)
+ modify = {
+ 'is_online': False,
+ 'junction_path': 'something'
+ }
+ obj = self.get_volume_mock_object('volume')
+ obj.modify_volume(modify)
+ change_state.assert_called_with()
+ mount_volume.assert_called_with()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_true_1(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '------------'
+ set_module_args(data)
+ current = {
+ 'unix_permissions': '0'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert obj.compare_chmod_value(current)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_true_2(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '---rwxrwxrwx'
+ set_module_args(data)
+ current = {
+ 'unix_permissions': '777'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert obj.compare_chmod_value(current)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_true_3(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '---rwxr-xr-x'
+ set_module_args(data)
+ current = {
+ 'unix_permissions': '755'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert obj.compare_chmod_value(current)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_true_4(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '755'
+ set_module_args(data)
+ current = {
+ 'unix_permissions': '755'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert obj.compare_chmod_value(current)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_false_1(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '---rwxrwxrwx'
+ set_module_args(data)
+ current = {
+ 'unix_permissions': '0'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert not obj.compare_chmod_value(current)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_false_2(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '---rwxrwxrwx'
+ set_module_args(data)
+ current = None
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert not obj.compare_chmod_value(current)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_invalid_input_1(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '---xwrxwrxwr'
+ set_module_args(data)
+ current = {
+ 'unix_permissions': '777'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert not obj.compare_chmod_value(current)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_invalid_input_2(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '---rwx-wx--a'
+ set_module_args(data)
+ current = {
+ 'unix_permissions': '0'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert not obj.compare_chmod_value(current)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_compare_chmod_value_invalid_input_3(self, get_volume):
+ data = self.mock_args()
+ data['unix_permissions'] = '---'
+ set_module_args(data)
+ current = {
+ 'unix_permissions': '0'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object()
+ assert not obj.compare_chmod_value(current)
+
+ def test_successful_create_flex_group_manually(self):
+ ''' Test successful create flexGroup manually '''
+ data = self.mock_args('flexGroup_manual')
+ data['time_out'] = 20
+ set_module_args(data)
+ obj = self.get_volume_mock_object('job_info')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_create_flex_group_auto_provision(self):
+ ''' Test successful create flexGroup auto provision '''
+ data = self.mock_args('flexGroup_auto')
+ data['time_out'] = 20
+ set_module_args(data)
+ obj = self.get_volume_mock_object('job_info')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_delete_flex_group(self, get_volume):
+ ''' Test successful delete flexGroup '''
+ data = self.mock_args('flexGroup_manual')
+ data['state'] = 'absent'
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexgroup',
+ 'unix_permissions': '755',
+ 'is_online': True
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object('job_info')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_resize_flex_group(self, get_volume):
+ ''' Test successful reszie flexGroup '''
+ data = self.mock_args('flexGroup_manual')
+ data['size'] = 400
+ data['size_unit'] = 'mb'
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexgroup',
+ 'size': 20971520,
+ 'unix_permissions': '755',
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object('job_info')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.check_job_status')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_modify_unix_permissions_flex_group(self, get_volume, check_job_status):
+ ''' Test successful modify unix permissions flexGroup '''
+ data = self.mock_args('flexGroup_manual')
+ data['time_out'] = 20
+ data['unix_permissions'] = '---rw-r-xr-x'
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexgroup',
+ 'unix_permissions': '777',
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ check_job_status.side_effect = [
+ None
+ ]
+ obj = self.get_volume_mock_object('success_modify_async')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ print(exc)
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_modify_unix_permissions_flex_group_0_time_out(self, get_volume):
+ ''' Test successful modify unix permissions flexGroup '''
+ data = self.mock_args('flexGroup_manual')
+ data['time_out'] = 0
+ data['unix_permissions'] = '---rw-r-xr-x'
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexgroup',
+ 'unix_permissions': '777',
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object('success_modify_async')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_modify_unix_permissions_flex_group_0_missing_result(self, get_volume):
+ ''' Test successful modify unix permissions flexGroup '''
+ data = self.mock_args('flexGroup_manual')
+ data['time_out'] = 0
+ data['unix_permissions'] = '---rw-r-xr-x'
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexgroup',
+ 'unix_permissions': '777',
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object('missing_element_modify_async')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.apply()
+ msg = "Unexpected error when modifying volume: result is:"
+ assert exc.value.args[0]['msg'].startswith(msg)
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.check_job_status')
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_error_modify_unix_permissions_flex_group(self, get_volume, check_job_status):
+ ''' Test error modify unix permissions flexGroup '''
+ data = self.mock_args('flexGroup_manual')
+ data['time_out'] = 20
+ data['unix_permissions'] = '---rw-r-xr-x'
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexgroup',
+ 'unix_permissions': '777',
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ check_job_status.side_effect = ['error']
+ obj = self.get_volume_mock_object('success_modify_async')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['msg'] == 'Error when modify volume: error'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_failure_modify_unix_permissions_flex_group(self, get_volume):
+ ''' Test failure modify unix permissions flexGroup '''
+ data = self.mock_args('flexGroup_manual')
+ data['unix_permissions'] = '---rw-r-xr-x'
+ data['time_out'] = 20
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexvol',
+ 'unix_permissions': '777',
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object('failure_modify_async')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying volume test_vol: modify error message'
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_offline_state_flex_group(self, get_volume):
+ ''' Test successful offline flexGroup state '''
+ data = self.mock_args('flexGroup_manual')
+ data['is_online'] = False
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexgroup',
+ 'is_online': True,
+ 'junction_path': 'anything',
+ 'unix_permissions': '755',
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object('job_info')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_online_state_flex_group(self, get_volume):
+ ''' Test successful online flexGroup state '''
+ data = self.mock_args('flexGroup_manual')
+ set_module_args(data)
+ current = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'style_extended': 'flexgroup',
+ 'is_online': False,
+ 'junction_path': 'anything',
+ 'unix_permissions': '755',
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ online = 'job_info' # not correct, but works
+ job = 'job_info'
+ success = 'success_modify_async'
+ mount = 'job_info' # not correct, but works
+ kind = [online, job, job, success, mount, job, job]
+ obj = self.get_volume_mock_object(kind)
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_check_job_status_error(self):
+ ''' Test check job status error '''
+ data = self.mock_args('flexGroup_manual')
+ data['time_out'] = 0
+ set_module_args(data)
+ obj = self.get_volume_mock_object('job_info', job_error='failure')
+ result = obj.check_job_status('123')
+ assert result == 'failure'
+
+ def test_check_job_status_time_out_is_0(self):
+ ''' Test check job status time out is 0'''
+ data = self.mock_args('flexGroup_manual')
+ data['time_out'] = 0
+ set_module_args(data)
+ obj = self.get_volume_mock_object('job_info', job_error='time_out')
+ result = obj.check_job_status('123')
+ assert result == 'job completion exceeded expected timer of: 0 seconds'
+
+ def test_check_job_status_unexpected(self):
+ ''' Test check job status unexpected state '''
+ data = self.mock_args('flexGroup_manual')
+ data['time_out'] = 20
+ set_module_args(data)
+ obj = self.get_volume_mock_object('job_info', job_error='other')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.check_job_status('123')
+ assert exc.value.args[0]['failed']
+
+ def test_error_set_efficiency_policy(self):
+ data = self.mock_args()
+ data['efficiency_policy'] = 'test_policy'
+ set_module_args(data)
+ obj = self.get_volume_mock_object('zapi_error')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.set_efficiency_config()
+ assert exc.value.args[0]['msg'] == 'Error enable efficiency on volume test_vol: NetApp API failed. Reason - test:error'
+
+ def test_error_set_efficiency_policy_async(self):
+ data = self.mock_args()
+ data['efficiency_policy'] = 'test_policy'
+ set_module_args(data)
+ obj = self.get_volume_mock_object('zapi_error')
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj.set_efficiency_config_async()
+ assert exc.value.args[0]['msg'] == 'Error enable efficiency on volume test_vol: NetApp API failed. Reason - test:error'
+
+ def test_successful_modify_tiering_policy(self):
+ ''' Test successful modify tiering policy '''
+ data = self.mock_args()
+ data['tiering_policy'] = 'auto'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_vserver_dr_protection(self):
+ ''' Test successful modify vserver_dr_protection '''
+ data = self.mock_args()
+ data['vserver_dr_protection'] = 'protected'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_group_id(self):
+ ''' Test successful modify group_id '''
+ data = self.mock_args()
+ data['group_id'] = 1001
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_user_id(self):
+ ''' Test successful modify user_id '''
+ data = self.mock_args()
+ data['user_id'] = 101
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume')
+ def test_successful_modify_snapshot_auto_delete(self, get_volume):
+ ''' Test successful modify unix permissions flexGroup '''
+ data = {
+ 'snapshot_auto_delete': {'delete_order': 'oldest_first', 'destroy_list': 'lun_clone,vol_clone',
+ 'target_free_space': 20, 'prefix': 'test', 'commitment': 'try',
+ 'state': 'on', 'trigger': 'snap_reserve', 'defer_delete': 'scheduled'},
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': 'test_vol',
+ 'vserver': 'test_vserver',
+
+ }
+ set_module_args(data)
+ current = {
+ 'name': self.mock_vol['name'],
+ 'vserver': self.mock_vol['vserver'],
+ 'snapshot_auto_delete': {'delete_order': 'newest_first', 'destroy_list': 'lun_clone,vol_clone',
+ 'target_free_space': 30, 'prefix': 'test', 'commitment': 'try',
+ 'state': 'on', 'trigger': 'snap_reserve', 'defer_delete': 'scheduled'},
+ 'uuid': '1234'
+ }
+ get_volume.side_effect = [
+ current
+ ]
+ obj = self.get_volume_mock_object('volume')
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ def test_error_modify_snapshot_auto_delete(self):
+ data = {
+ 'snapshot_auto_delete': {'delete_order': 'oldest_first', 'destroy_list': 'lun_clone,vol_clone',
+ 'target_free_space': 20, 'prefix': 'test', 'commitment': 'try',
+ 'state': 'on', 'trigger': 'snap_reserve', 'defer_delete': 'scheduled'},
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': 'test_vol',
+ 'vserver': 'test_vserver',
+
+ }
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object('zapi_error').set_snapshot_auto_delete()
+ assert exc.value.args[0]['msg'] == 'Error setting snapshot auto delete options for volume test_vol: NetApp API failed. Reason - test:error'
+
+ def test_successful_volume_rehost(self):
+ data = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': 'test_vol',
+ 'vserver': 'dest_vserver',
+ 'from_vserver': 'source_vserver'
+ }
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_error_volume_rehost(self):
+ data = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': 'test_vol',
+ 'vserver': 'dest_vserver',
+ 'from_vserver': 'source_vserver'
+ }
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object('zapi_error').rehost_volume()
+ assert exc.value.args[0]['msg'] == 'Error rehosting volume test_vol: NetApp API failed. Reason - test:error'
+
+ def test_successful_volume_restore(self):
+ data = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': 'test_vol',
+ 'vserver': 'test_vserver',
+ 'snapshot_restore': 'snapshot_copy'
+ }
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object('volume').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_error_volume_restore(self):
+ data = {
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'name': 'test_vol',
+ 'vserver': 'test_vserver',
+ 'snapshot_restore': 'snapshot_copy'
+ }
+ set_module_args(data)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object('zapi_error').snapshot_restore_volume()
+ assert exc.value.args[0]['msg'] == 'Error restoring volume test_vol: NetApp API failed. Reason - test:error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_autosize.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_autosize.py
new file mode 100644
index 00000000..653fddc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_autosize.py
@@ -0,0 +1,243 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_volume_autosize '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_autosize \
+ import NetAppOntapVolumeAutosize as autosize_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_uuid': (200, {'records': [{'uuid': 'testuuid'}]}, None),
+ 'get_autosize': (200,
+ {'uuid': 'testuuid',
+ 'name': 'testname',
+ 'autosize': {"maximum": 10737418240,
+ "minimum": 22020096,
+ "grow_threshold": 99,
+ "shrink_threshold": 40,
+ "mode": "grow"
+ }
+ }, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'autosize':
+ xml = self.build_autosize_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_autosize_info(autosize_details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'grow-threshold-percent': autosize_details['grow_threshold_percent'],
+ 'maximum-size': '10485760',
+ 'minimum-size': '21504',
+ 'increment_size': '10240',
+ 'mode': autosize_details['mode'],
+ 'shrink-threshold-percent': autosize_details['shrink_threshold_percent']
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_autosize = {
+ 'grow_threshold_percent': 99,
+ 'maximum_size': '10g',
+ 'minimum_size': '21m',
+ 'increment_size': '10m',
+ 'mode': 'grow',
+ 'shrink_threshold_percent': 40,
+ 'vserver': 'test_vserver',
+ 'volume': 'test_volume'
+ }
+
+ def mock_args(self, rest=False):
+ if rest:
+ return {
+ 'vserver': self.mock_autosize['vserver'],
+ 'volume': self.mock_autosize['volume'],
+ 'grow_threshold_percent': self.mock_autosize['grow_threshold_percent'],
+ 'maximum_size': self.mock_autosize['maximum_size'],
+ 'minimum_size': self.mock_autosize['minimum_size'],
+ 'mode': self.mock_autosize['mode'],
+ 'shrink_threshold_percent': self.mock_autosize['shrink_threshold_percent'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+ else:
+ return {
+ 'vserver': self.mock_autosize['vserver'],
+ 'volume': self.mock_autosize['volume'],
+ 'grow_threshold_percent': self.mock_autosize['grow_threshold_percent'],
+ 'maximum_size': self.mock_autosize['maximum_size'],
+ 'minimum_size': self.mock_autosize['minimum_size'],
+ 'increment_size': self.mock_autosize['increment_size'],
+ 'mode': self.mock_autosize['mode'],
+ 'shrink_threshold_percent': self.mock_autosize['shrink_threshold_percent'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'use_rest': 'never'
+ }
+
+ def get_autosize_mock_object(self, cx_type='zapi', kind=None):
+ autosize_obj = autosize_module()
+ if cx_type == 'zapi':
+ if kind is None:
+ autosize_obj.server = MockONTAPConnection()
+ elif kind == 'autosize':
+ autosize_obj.server = MockONTAPConnection(kind='autosize', data=self.mock_autosize)
+ return autosize_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ autosize_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_idempotent_modify(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_autosize_mock_object('zapi', 'autosize').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successful_modify(self):
+ data = self.mock_args()
+ data['maximum_size'] = '11g'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_autosize_mock_object('zapi', 'autosize').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_reset(self):
+ data = {}
+ data['reset'] = True
+ data['hostname'] = 'test'
+ data['username'] = 'test_user'
+ data['password'] = 'test_pass!'
+ data['volume'] = 'test_vol'
+ data['vserver'] = 'test_vserver'
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_autosize_mock_object('zapi', 'autosize').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_autosize_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_modify(self, mock_request):
+ data = self.mock_args(rest=True)
+ data['maximum_size'] = '11g'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'],
+ SRR['get_autosize'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_autosize_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotent_modify(self, mock_request):
+ data = self.mock_args(rest=True)
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_uuid'],
+ SRR['get_autosize'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_autosize_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone.py
new file mode 100644
index 00000000..eb78e3fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone.py
@@ -0,0 +1,257 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: na_ontap_volume_clone'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_clone \
+ import NetAppONTAPVolumeClone as my_module
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None):
+ ''' save arguments '''
+ self.type = kind
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'volume_clone':
+ xml = self.build_volume_clone_info()
+ elif self.type == 'volume_clone_split_in_progress':
+ xml = self.build_volume_clone_info_split_in_progress()
+ elif self.type == 'volume_clone_fail':
+ raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_volume_clone_info():
+ ''' build xml data for volume-clone-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'attributes': {'volume-clone-info': {'volume': 'ansible',
+ 'parent-volume': 'ansible'}}}
+ xml.translate_struct(data)
+ return xml
+
+ @staticmethod
+ def build_volume_clone_info_split_in_progress():
+ ''' build xml data for volume-clone-info whilst split in progress '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ data = {'attributes': {'volume-clone-info': {'volume': 'ansible',
+ 'parent-volume': 'ansible',
+ 'block-percentage-complete': 20,
+ 'blocks-scanned': 56676,
+ 'blocks-updated': 54588}}}
+ xml.translate_struct(data)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.vserver = MockONTAPConnection()
+ self.onbox = False
+
+ def set_default_args(self):
+ if self.onbox:
+ hostname = '10.10.10.10'
+ username = 'username'
+ password = 'password'
+ vserver = 'ansible'
+ volume = 'ansible'
+ parent_volume = 'ansible'
+ split = None
+ else:
+ hostname = '10.10.10.10'
+ username = 'username'
+ password = 'password'
+ vserver = 'ansible'
+ volume = 'ansible'
+ parent_volume = 'ansible'
+ split = None
+ return dict({
+ 'hostname': hostname,
+ 'username': username,
+ 'password': password,
+ 'vserver': vserver,
+ 'volume': volume,
+ 'parent_volume': parent_volume,
+ 'split': split
+ })
+
+ def set_default_current(self):
+ return dict({
+ 'split': False
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' test required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_ensure_get_called(self):
+ ''' test get_volume_clone() for non-existent volume clone'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.vserver = self.vserver
+ assert my_obj.get_volume_clone() is None
+
+ def test_ensure_get_called_existing(self):
+ ''' test get_volume_clone() for existing volume clone'''
+ set_module_args(self.set_default_args())
+ my_obj = my_module()
+ my_obj.vserver = MockONTAPConnection(kind='volume_clone')
+ current = self.set_default_current()
+ assert my_obj.get_volume_clone() == current
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_clone.NetAppONTAPVolumeClone.create_volume_clone')
+ def test_successful_create(self, create_volume_clone):
+ ''' test creating volume_clone without split and testing idempotency '''
+ module_args = {
+ 'parent_vserver': 'abc',
+ 'parent_snapshot': 'abc',
+ 'volume_type': 'dp',
+ 'qos_policy_group_name': 'abc',
+ 'junction_path': 'abc',
+ 'uid': '1',
+ 'gid': '1'
+ }
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.vserver = self.vserver
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_volume_clone.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.vserver = MockONTAPConnection('volume_clone')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_clone.NetAppONTAPVolumeClone.create_volume_clone')
+ def test_successful_create_with_split(self, create_volume_clone):
+ ''' test creating volume_clone with split and testing idempotency '''
+ module_args = {
+ 'parent_snapshot': 'abc',
+ 'parent_vserver': 'abc',
+ 'volume_type': 'dp',
+ 'qos_policy_group_name': 'abc',
+ 'junction_path': 'abc',
+ 'uid': '1',
+ 'gid': '1'
+ }
+ module_args.update(self.set_default_args())
+ module_args['split'] = True
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.vserver = self.vserver
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+ create_volume_clone.assert_called_with()
+ # to reset na_helper from remembering the previous 'changed' value
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.vserver = MockONTAPConnection('volume_clone_split_in_progress')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_clone.NetAppONTAPVolumeClone.create_volume_clone')
+ def test_successful_create_with_split_in_progress(self, create_volume_clone):
+ ''' test creating volume_clone with split and split already in progress '''
+ module_args = {
+ 'parent_snapshot': 'abc',
+ 'parent_vserver': 'abc',
+ 'volume_type': 'dp',
+ 'qos_policy_group_name': 'abc',
+ 'junction_path': 'abc',
+ 'uid': '1',
+ 'gid': '1'
+ }
+ module_args.update(self.set_default_args())
+ module_args['split'] = True
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.vserver = MockONTAPConnection('volume_clone_split_in_progress')
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_if_all_methods_catch_exception(self):
+ ''' test if all methods catch exception '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ set_module_args(module_args)
+ my_obj = my_module()
+ if not self.onbox:
+ my_obj.vserver = MockONTAPConnection('volume_clone_fail')
+ my_obj.create_server = my_obj.vserver
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_volume_clone()
+ assert 'Error fetching volume clone information ' in exc.value.args[0]['msg']
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_volume_clone()
+ assert 'Error creating volume clone: ' in exc.value.args[0]['msg']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_rest.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_rest.py
new file mode 100644
index 00000000..fe5016e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_rest.py
@@ -0,0 +1,333 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume \
+ import NetAppOntapVolume as volume_module # module under test
+
+# needed for get and modify/delete as they still use ZAPI
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'svm_record': (200,
+ {'records': [{"uuid": "09e9fd5e-8ebd-11e9-b162-005056b39fe7",
+ "name": "test_svm",
+ "subtype": "default",
+ "language": "c.utf_8",
+ "aggregates": [{"name": "aggr_1",
+ "uuid": "850dd65b-8811-4611-ac8c-6f6240475ff9"},
+ {"name": "aggr_2",
+ "uuid": "850dd65b-8811-4611-ac8c-6f6240475ff9"}],
+ "comment": "new comment",
+ "ipspace": {"name": "ansible_ipspace",
+ "uuid": "2b760d31-8dfd-11e9-b162-005056b39fe7"},
+ "snapshot_policy": {"uuid": "3b611707-8dfd-11e9-b162-005056b39fe7",
+ "name": "old_snapshot_policy"},
+ "nfs": {"enabled": True},
+ "cifs": {"enabled": False},
+ "iscsi": {"enabled": False},
+ "fcp": {"enabled": False},
+ "nvme": {"enabled": False}}]}, None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None, get_volume=None):
+ ''' save arguments '''
+ self.type = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+ self.get_volume = get_volume
+ self.zapis = list()
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ zapi = xml.get_name()
+ self.zapis.append(zapi)
+ request = xml.to_string().decode('utf-8')
+ if self.type == 'error':
+ raise OSError('unexpected call to %s' % self.params)
+ print('request:', request)
+ if request.startswith('<volume-get-iter>'):
+ what = None
+ if self.get_volume:
+ what = self.get_volume.pop(0)
+ if what is None:
+ xml = self.build_empty_response()
+ else:
+ xml = self.build_get_response(what)
+ self.xml_out = xml
+ print('response:', xml.to_string())
+ return xml
+
+ @staticmethod
+ def build_response(data):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ xml.translate_struct(data)
+ return xml
+
+ def build_empty_response(self):
+ data = {'num-records': '0'}
+ return self.build_response(data)
+
+ def build_get_response(self, name):
+ ''' build xml data for vserser-info '''
+ if name is None:
+ return self.build_empty_response()
+ data = {'num-records': 1,
+ 'attributes-list': [{
+ 'volume-attributes': {
+ 'volume-id-attributes': {
+ 'name': name,
+ 'instance-uuid': '123'
+ },
+ 'volume-performance-attributes': {
+ 'is-atime-update-enabled': 'true'
+ },
+ 'volume-security-attributes': {
+ 'volume-security-unix-attributes': {
+ 'permissions': 777
+ }
+ },
+ 'volume-snapshot-attributes': {
+ 'snapshot-policy': 'default'
+ },
+ 'volume-snapshot-autodelete-attributes': {
+ 'is-autodelete-enabled': 'true'
+ },
+ 'volume-space-attributes': {
+ 'size': 10737418240 # 10 GB
+ },
+ 'volume-state-attributes': {
+ 'state': 'online'
+ },
+ }
+ }]}
+ return self.build_response(data)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ # self.server = MockONTAPConnection()
+ self.mock_vserver = {
+ 'name': 'test_svm',
+ 'root_volume': 'ansible_vol',
+ 'root_volume_aggregate': 'ansible_aggr',
+ 'aggr_list': 'aggr_1,aggr_2',
+ 'ipspace': 'ansible_ipspace',
+ 'subtype': 'default',
+ 'language': 'c.utf_8',
+ 'snapshot_policy': 'old_snapshot_policy',
+ 'comment': 'new comment'
+ }
+
+ @staticmethod
+ def mock_args():
+ return {'name': 'test_volume',
+ 'vserver': 'ansibleSVM',
+ 'nas_application_template': dict(
+ tiering=None
+ ),
+ # 'aggregate_name': 'whatever', # not used for create when using REST application/applications
+ 'size': 10,
+ 'size_unit': 'gb',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'}
+
+ def get_volume_mock_object(self, **kwargs):
+ volume_obj = volume_module()
+ netapp_utils.ems_log_event = Mock(return_value=None)
+ volume_obj.server = MockONTAPConnection(**kwargs)
+ volume_obj.cluster = MockONTAPConnection(kind='error', data='cluster ZAPI.')
+ return volume_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ volume_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_fail_if_aggr_is_set(self, mock_request):
+ data = dict(self.mock_args())
+ data['aggregate_name'] = 'should_fail'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object().apply()
+ error = 'Conflict: aggregate_name is not supported when application template is enabled. Found: aggregate_name: should_fail'
+ assert exc.value.args[0]['msg'] == error
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_missing_size(self, mock_request):
+ data = dict(self.mock_args())
+ data.pop('size')
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object().apply()
+ error = 'Error: "size" is required to create nas application.'
+ assert exc.value.args[0]['msg'] == error
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_mismatched_tiering_policies(self, mock_request):
+ data = dict(self.mock_args())
+ data['tiering_policy'] = 'none'
+ data['nas_application_template'] = dict(
+ tiering=dict(policy='auto')
+ )
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object().apply()
+ error = 'Conflict: if tiering_policy and nas_application_template tiering policy are both set, they must match.'
+ error += ' Found "none" and "auto".'
+ assert exc.value.args[0]['msg'] == error
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = dict(self.mock_args())
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['generic_error'], # POST application/applications
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_volume_mock_object().apply()
+ assert exc.value.args[0]['msg'] == 'Error: calling: /application/applications: got %s' % SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_created(self, mock_request):
+ data = dict(self.mock_args())
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['empty_good'], # POST application/applications
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_idempotency(self, mock_request):
+ data = dict(self.mock_args())
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_volume_mock_object(get_volume=['test']).apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_created_with_modify(self, mock_request):
+ ''' since language is not supported in application, the module is expected to:
+ 1. create the volume using application REST API
+ 2. immediately modify the volume to update options which not available in the nas template.
+ '''
+ data = dict(self.mock_args())
+ data['language'] = 'fr' # TODO: apparently language is not supported for modify
+ data['unix_permissions'] = '---rw-rx-r--'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['empty_good'], # POST application/applications
+ SRR['end_of_sequence']
+ ]
+ my_volume = self.get_volume_mock_object(get_volume=[None, 'test'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_volume.apply()
+ assert exc.value.args[0]['changed']
+ print(exc.value.args[0])
+ assert 'unix_permissions' in exc.value.args[0]['modify_after_create']
+ assert 'language' not in exc.value.args[0]['modify_after_create'] # eh!
+ assert 'volume-modify-iter' in my_volume.server.zapis
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successfully_resized(self, mock_request):
+ ''' make sure resize if using RESP API if sizing_method is present
+ '''
+ data = dict(self.mock_args())
+ data['sizing_method'] = 'add_new_resources'
+ data['size'] = 20737418240
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['empty_good'], # PATCH application/applications
+ SRR['end_of_sequence']
+ ]
+ my_volume = self.get_volume_mock_object(get_volume=['test'])
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_volume.apply()
+ assert exc.value.args[0]['changed']
+ print(exc.value.args[0])
+ assert 'volume-size' not in my_volume.server.zapis
+ print(mock_request.call_args)
+ mock_request.assert_called_with('PATCH', '/storage/volumes/123', {'sizing_method': 'add_new_resources'}, json={'size': 22266633286068469760})
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py
new file mode 100644
index 00000000..45ec4b40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py
@@ -0,0 +1,166 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests for Ansible module: na_ontap_volume_snaplock """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_snaplock \
+ import NetAppOntapVolumeSnaplock as snaplock_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'snaplock':
+ xml = self.build_snaplock_info(self.params)
+ elif self.type == 'zapi_error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_snaplock_info(data):
+ ''' build xml data for vserser-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {'snaplock-attrs': {
+ 'snaplock-attrs-info': {
+ 'autocommit-period': data['autocommit_period'],
+ 'default-retention-period': data['default_retention_period'],
+ 'maximum-retention-period': data['maximum_retention_period'],
+ 'minimum-retention-period': data['minimum_retention_period'],
+ 'is-volume-append-mode-enabled': data['is_volume_append_mode_enabled']
+ }
+ }}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_snaplock = {
+ 'autocommit_period': '10days',
+ 'default_retention_period': '1years',
+ 'maximum_retention_period': '2years',
+ 'minimum_retention_period': '6months',
+ 'is_volume_append_mode_enabled': 'false'
+ }
+
+ def mock_args(self):
+ return {
+ 'name': 'test_volume',
+ 'autocommit_period': self.mock_snaplock['autocommit_period'],
+ 'default_retention_period': self.mock_snaplock['default_retention_period'],
+ 'maximum_retention_period': self.mock_snaplock['maximum_retention_period'],
+ 'minimum_retention_period': self.mock_snaplock['minimum_retention_period'],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'vserver': 'test_vserver'
+ }
+
+ def get_snaplock_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_volume_snaplock object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_volume_snaplock object
+ """
+ snaplock_obj = snaplock_module()
+ netapp_utils.ems_log_event = Mock(return_value=None)
+ if kind is None:
+ snaplock_obj.server = MockONTAPConnection()
+ else:
+ snaplock_obj.server = MockONTAPConnection(kind=kind, data=self.mock_snaplock)
+ return snaplock_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ snaplock_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_existing_snaplock(self):
+ set_module_args(self.mock_args())
+ result = self.get_snaplock_mock_object(kind='snaplock').get_volume_snaplock_attrs()
+ assert result['autocommit_period'] == self.mock_snaplock['autocommit_period']
+ assert result['default_retention_period'] == self.mock_snaplock['default_retention_period']
+ assert result['is_volume_append_mode_enabled'] is False
+ assert result['maximum_retention_period'] == self.mock_snaplock['maximum_retention_period']
+
+ def test_modify_snaplock(self):
+ data = self.mock_args()
+ data['maximum_retention_period'] = '5years'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_snaplock_mock_object('snaplock').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_snaplock.NetAppOntapVolumeSnaplock.get_volume_snaplock_attrs')
+ def test_modify_snaplock_error(self, get_volume_snaplock_attrs):
+ data = self.mock_args()
+ data['maximum_retention_period'] = '5years'
+ set_module_args(data)
+ get_volume_snaplock_attrs.side_effect = [self.mock_snaplock]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_snaplock_mock_object('zapi_error').apply()
+ assert exc.value.args[0]['msg'] == 'Error setting snaplock attributes for volume test_volume : NetApp API failed. Reason - test:error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan.py
new file mode 100644
index 00000000..6ea6893c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan.py
@@ -0,0 +1,234 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_vscan'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan \
+ import NetAppOntapVscan as vscan_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required"
+
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'enabled': (200, {'records': [{'enabled': True, 'svm': {'uuid': 'testuuid'}}]}, None),
+ 'disabled': (200, {'records': [{'enabled': False, 'svm': {'uuid': 'testuuid'}}]}, None),
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'enable':
+ xml = self.build_vscan_status_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_vscan_status_info(status):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {'num-records': 1,
+ 'attributes-list': {'vscan-status-info': {'is-vscan-enabled': status}}}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def mock_args(self):
+ return {
+ 'enable': False,
+ 'vserver': 'vserver',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_vscan_mock_object(self, cx_type='zapi', kind=None, status=None):
+ vscan_obj = vscan_module()
+ if cx_type == 'zapi':
+ if kind is None:
+ vscan_obj.server = MockONTAPConnection()
+ else:
+ vscan_obj.server = MockONTAPConnection(kind=kind, data=status)
+ # For rest, mocking is achieved through side_effect
+ return vscan_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ vscan_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_successfully_enable(self):
+ data = self.mock_args()
+ data['enable'] = True
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vscan_mock_object('zapi', 'enable', 'false').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_idempotently_enable(self):
+ data = self.mock_args()
+ data['enable'] = True
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vscan_mock_object('zapi', 'enable', 'true').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_disable(self):
+ data = self.mock_args()
+ data['enable'] = False
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vscan_mock_object('zapi', 'enable', 'true').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_idempotently_disable(self):
+ data = self.mock_args()
+ data['enable'] = False
+ data['use_rest'] = 'never'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vscan_mock_object('zapi', 'enable', 'false').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_error(self, mock_request):
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_vscan_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['msg'] == SRR['generic_error'][2]
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successly_enable(self, mock_request):
+ data = self.mock_args()
+ data['enable'] = True
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['disabled'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vscan_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotently_enable(self, mock_request):
+ data = self.mock_args()
+ data['enable'] = True
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['enabled'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vscan_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successly_disable(self, mock_request):
+ data = self.mock_args()
+ data['enable'] = False
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['enabled'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vscan_mock_object(cx_type='rest').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_idempotently_disable(self, mock_request):
+ data = self.mock_args()
+ data['enable'] = False
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['disabled'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vscan_mock_object(cx_type='rest').apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py
new file mode 100644
index 00000000..c595e73e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py
@@ -0,0 +1,159 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_vscan_scanner_pool '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan_on_access_policy \
+ import NetAppOntapVscanOnAccessPolicy as policy_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required"
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'policy':
+ xml = self.build_access_policy_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_access_policy_info(policy_details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {'num-records': 1,
+ 'attributes-list': {'vscan-on-access-policy-info': {'policy-name': policy_details['policy_name']}}}
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_access_policy = {
+ 'state': 'present',
+ 'vserver': 'test_vserver',
+ 'policy_name': 'test_carchi',
+ 'max_file_size': 2147483648 + 1 # 2GB + 1
+ }
+
+ def mock_args(self):
+ return {
+ 'state': self.mock_access_policy['state'],
+ 'vserver': self.mock_access_policy['vserver'],
+ 'policy_name': self.mock_access_policy['policy_name'],
+ 'max_file_size': self.mock_access_policy['max_file_size'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_policy_mock_object(self, kind=None):
+ policy_obj = policy_module()
+ if kind is None:
+ policy_obj.server = MockONTAPConnection()
+ else:
+ policy_obj.server = MockONTAPConnection(kind='policy', data=self.mock_access_policy)
+ return policy_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ policy_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_policy(self):
+ set_module_args(self.mock_args())
+ result = self.get_policy_mock_object().exists_access_policy()
+ assert not result
+
+ def test_get_existing_scanner(self):
+ set_module_args(self.mock_args())
+ result = self.get_policy_mock_object('policy').exists_access_policy()
+ assert result
+
+ def test_successfully_create(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successfully_delete(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_mock_object('policy').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_policy_mock_object().apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py
new file mode 100644
index 00000000..a39e3732
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py
@@ -0,0 +1,168 @@
+''' unit tests for Ansible module: na_ontap_vscan_on_demand_task '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan_on_demand_task \
+ import NetAppOntapVscanOnDemandTask as onDemand_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'task':
+ xml = self.build_onDemand_pool_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_onDemand_pool_info(onDemand_details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'vscan-on-demand-task-info': {
+ 'task-name': onDemand_details['task_name'],
+ 'report-directory': onDemand_details['report_directory'],
+ 'scan-paths': {
+ 'string': onDemand_details['scan_paths']
+ }
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_onDemand = {
+ 'state': 'present',
+ 'vserver': 'test_vserver',
+ 'report_directory': '/',
+ 'task_name': '/',
+ 'scan_paths': '/'
+ }
+
+ def mock_args(self):
+ return {
+ 'state': self.mock_onDemand['state'],
+ 'vserver': self.mock_onDemand['vserver'],
+ 'report_directory': self.mock_onDemand['report_directory'],
+ 'task_name': self.mock_onDemand['task_name'],
+ 'scan_paths': self.mock_onDemand['scan_paths'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_demand_mock_object(self, kind=None):
+ scanner_obj = onDemand_module()
+ scanner_obj.asup_log_for_cserver = Mock(return_value=None)
+ if kind is None:
+ scanner_obj.server = MockONTAPConnection()
+ else:
+ scanner_obj.server = MockONTAPConnection(kind='task', data=self.mock_onDemand)
+ return scanner_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ onDemand_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_demand_task(self):
+ set_module_args(self.mock_args())
+ result = self.get_demand_mock_object().get_demand_task()
+ assert not result
+
+ def test_get_existing_demand_task(self):
+ set_module_args(self.mock_args())
+ result = self.get_demand_mock_object('task').get_demand_task()
+ assert result
+
+ def test_successfully_create(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_demand_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_demand_mock_object('task').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_delete(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_demand_mock_object('task').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_demand_mock_object().apply()
+ assert not exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py
new file mode 100644
index 00000000..a1aae605
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py
@@ -0,0 +1,188 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_vscan_scanner_pool '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan_scanner_pool \
+ import NetAppOntapVscanScannerPool as scanner_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.params = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'scanner':
+ xml = self.build_scanner_pool_info(self.params)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_scanner_pool_info(sanner_details):
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'vscan-scanner-pool-info': {
+ 'scanner-pool': sanner_details['scanner_pool'],
+ 'scanner-policy': sanner_details['scanner_policy'],
+ 'hostnames': [
+ {'hostname': sanner_details['hostnames'][0]},
+ {'hostname': sanner_details['hostnames'][1]}
+ ],
+ 'privileged-users': [
+ {"privileged-user": sanner_details['privileged_users'][0]},
+ {"privileged-user": sanner_details['privileged_users'][1]}
+ ]
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_job_schedule '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_scanner = {
+ 'state': 'present',
+ 'scanner_pool': 'test_pool',
+ 'vserver': 'test_vserver',
+ 'hostnames': ['host1', 'host2'],
+ 'privileged_users': ['domain\\admin', 'domain\\carchi8py'],
+ 'scanner_policy': 'primary'
+ }
+
+ def mock_args(self):
+ return {
+ 'state': self.mock_scanner['state'],
+ 'scanner_pool': self.mock_scanner['scanner_pool'],
+ 'vserver': self.mock_scanner['vserver'],
+ 'hostnames': self.mock_scanner['hostnames'],
+ 'privileged_users': self.mock_scanner['privileged_users'],
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'scanner_policy': self.mock_scanner['scanner_policy']
+ }
+
+ def get_scanner_mock_object(self, kind=None):
+ scanner_obj = scanner_module()
+ scanner_obj.asup_log_for_cserver = Mock(return_value=None)
+ if kind is None:
+ scanner_obj.server = MockONTAPConnection()
+ else:
+ scanner_obj.server = MockONTAPConnection(kind='scanner', data=self.mock_scanner)
+ return scanner_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ scanner_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_get_nonexistent_scanner(self):
+ ''' Test if get_scanner_pool returns None for non-existent job '''
+ set_module_args(self.mock_args())
+ result = self.get_scanner_mock_object().get_scanner_pool()
+ assert not result
+
+ def test_get_existing_scanner(self):
+ ''' Test if get_scanner_pool returns None for non-existent job '''
+ set_module_args(self.mock_args())
+ result = self.get_scanner_mock_object('scanner').get_scanner_pool()
+ assert result
+
+ def test_successfully_create(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_scanner_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ def test_create_idempotency(self):
+ set_module_args(self.mock_args())
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_scanner_mock_object('scanner').apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_delete(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_scanner_mock_object('scanner').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_delete_idempotency(self):
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_scanner_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_successfully_modify(self):
+ data = self.mock_args()
+ data['hostnames'] = "host1"
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_scanner_mock_object('scanner').apply()
+ assert exc.value.args[0]['changed']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py
new file mode 100644
index 00000000..ccc48b24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py
@@ -0,0 +1,166 @@
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_cifs_security \
+ import NetAppONTAPCifsSecurity as cifs_security_module # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.type = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.type == 'cifs_security':
+ xml = self.build_security_info(self.data)
+ if self.type == 'error':
+ error = netapp_utils.zapi.NaApiError('test', 'error')
+ raise error
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_security_info(cifs_security_details):
+ ''' build xml data for cifs-security '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'cifs-security': {
+ 'is-aes-encryption-enabled': str(cifs_security_details['is_aes_encryption_enabled']).lower(),
+ 'lm-compatibility-level': cifs_security_details['lm_compatibility_level'],
+ 'kerberos-clock-skew': str(cifs_security_details['kerberos_clock_skew'])
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_cifs_security = {
+ 'is_aes_encryption_enabled': True,
+ 'lm_compatibility_level': 'krb',
+ 'kerberos_clock_skew': 10
+ }
+
+ def mock_args(self):
+ return {
+ 'is_aes_encryption_enabled': self.mock_cifs_security['is_aes_encryption_enabled'],
+ 'lm_compatibility_level': self.mock_cifs_security['lm_compatibility_level'],
+ 'kerberos_clock_skew': self.mock_cifs_security['kerberos_clock_skew'],
+ 'vserver': 'ansible',
+ 'hostname': 'test',
+ 'username': 'test_user',
+ 'password': 'test_pass!',
+ 'https': 'False'
+ }
+
+ def get_cifs_security_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_vserver_cifs_security object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_vserver_cifs_security object
+ """
+ obj = cifs_security_module()
+ obj.asup_log_for_cserver = Mock(return_value=None)
+ obj.server = Mock()
+ obj.server.invoke_successfully = Mock()
+ if kind is None:
+ obj.server = MockONTAPConnection()
+ else:
+ obj.server = MockONTAPConnection(kind=kind, data=self.mock_cifs_security)
+ return obj
+
+ def test_successful_modify_int_option(self):
+ ''' Test successful modify kerberos_clock_skew '''
+ data = self.mock_args()
+ data['kerberos_clock_skew'] = 15
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_cifs_security_mock_object('cifs_security').apply()
+ assert exc.value.args[0]['changed']
+
+ def test_successful_modify_bool_option(self):
+ ''' Test successful modify is_aes_encryption_enabled '''
+ data = self.mock_args()
+ data['is_aes_encryption_enabled'] = False
+ set_module_args(data)
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_cifs_security_mock_object('cifs_security').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_cifs_security.NetAppONTAPCifsSecurity.cifs_security_get_iter')
+ def test_modify_error(self, get_cifs_security):
+ ''' Test modify error '''
+ data = self.mock_args()
+ set_module_args(data)
+ current = {
+ 'is_aes_encryption_enabled': False
+ }
+ get_cifs_security.side_effect = [
+ current
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_cifs_security_mock_object('error').apply()
+ assert exc.value.args[0]['msg'] == 'Error modifying cifs security on ansible: NetApp API failed. Reason - test:error'
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer.py
new file mode 100644
index 00000000..01523aac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer.py
@@ -0,0 +1,250 @@
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit test template for ONTAP Ansible module '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer \
+ import NetAppONTAPVserverPeer as vserver_peer # module under test
+
+if not netapp_utils.has_netapp_lib():
+ pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockONTAPConnection(object):
+ ''' mock server connection to ONTAP host '''
+
+ def __init__(self, kind=None, data=None):
+ ''' save arguments '''
+ self.kind = kind
+ self.data = data
+ self.xml_in = None
+ self.xml_out = None
+
+ def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
+ ''' mock invoke_successfully returning xml data '''
+ self.xml_in = xml
+ if self.kind == 'vserver_peer':
+ xml = self.build_vserver_peer_info(self.data)
+ if self.kind == 'cluster':
+ xml = self.build_cluster_info(self.data)
+ self.xml_out = xml
+ return xml
+
+ @staticmethod
+ def build_vserver_peer_info(vserver):
+ ''' build xml data for vserser-peer-info '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'num-records': 1,
+ 'attributes-list': {
+ 'vserver-peer-info': {
+ 'peer-vserver': vserver['peer_vserver'],
+ 'vserver': vserver['vserver'],
+ 'peer-state': 'peered'
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ print(xml.to_string())
+ return xml
+
+ @staticmethod
+ def build_cluster_info(vserver):
+ ''' build xml data for cluster-identity-get '''
+ xml = netapp_utils.zapi.NaElement('xml')
+ attributes = {
+ 'attributes': {
+ 'cluster-identity-info': {
+ 'cluster-name': vserver['peer_cluster']
+ }
+ }
+ }
+ xml.translate_struct(attributes)
+ print(xml.to_string())
+ return xml
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_vserver_peer = {
+ 'vserver': 'test',
+ 'peer_vserver': 'test_peer',
+ 'peer_cluster': 'test_cluster_peer',
+ 'applications': ['snapmirror'],
+ 'hostname': 'hostname',
+ 'dest_hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+
+ }
+
+ def get_vserver_peer_mock_object(self, kind=None):
+ """
+ Helper method to return an na_ontap_vserver_peer object
+ :param kind: passes this param to MockONTAPConnection()
+ :return: na_ontap_vserver_peer object
+ """
+ vserver_peer_obj = vserver_peer()
+ vserver_peer_obj.asup_log_for_cserver = Mock(return_value=None)
+ if kind is None:
+ vserver_peer_obj.server = MockONTAPConnection()
+ vserver_peer_obj.dest_server = MockONTAPConnection()
+ else:
+ vserver_peer_obj.server = MockONTAPConnection(kind=kind, data=self.mock_vserver_peer)
+ vserver_peer_obj.dest_server = MockONTAPConnection(kind=kind, data=self.mock_vserver_peer)
+ return vserver_peer_obj
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ vserver_peer()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer.NetAppONTAPVserverPeer.vserver_peer_get')
+ def test_successful_create(self, vserver_peer_get):
+ ''' Test successful create '''
+ data = self.mock_vserver_peer
+ data['dest_hostname'] = 'test_destination'
+ set_module_args(self.mock_vserver_peer)
+ vserver_peer_get.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_peer_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer.NetAppONTAPVserverPeer.vserver_peer_get')
+ def test_create_idempotency(self, vserver_peer_get):
+ ''' Test create idempotency '''
+ data = self.mock_vserver_peer
+ data['dest_hostname'] = 'test_destination'
+ set_module_args(self.mock_vserver_peer)
+ current = {
+ 'vserver': 'test',
+ 'peer_vserver': self.mock_vserver_peer['peer_vserver'],
+ 'peer_cluster': self.mock_vserver_peer['peer_cluster']
+ }
+ vserver_peer_get.return_value = current
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_peer_mock_object('vserver_peer').apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer.NetAppONTAPVserverPeer.vserver_peer_get')
+ def test_successful_delete(self, vserver_peer_get):
+ ''' Test successful delete peer '''
+ data = self.mock_vserver_peer
+ data['state'] = 'absent'
+ set_module_args(data)
+ vserver_peer_get.return_value = Mock()
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_peer_mock_object('vserver_peer').apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer.NetAppONTAPVserverPeer.vserver_peer_get')
+ def test_delete_idempotency(self, vserver_peer_get):
+ ''' Test delete idempotency '''
+ data = self.mock_vserver_peer
+ data['state'] = 'absent'
+ set_module_args(data)
+ vserver_peer_get.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_peer_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ def test_helper_vserver_peer_get_iter(self):
+ ''' Test vserver_peer_get_iter method '''
+ set_module_args(self.mock_vserver_peer)
+ obj = self.get_vserver_peer_mock_object('vserver_peer')
+ result = obj.vserver_peer_get_iter()
+ print(result.to_string(pretty=True))
+ assert result['query'] is not None
+ assert result['query']['vserver-peer-info'] is not None
+ info = result['query']['vserver-peer-info']
+ assert info['vserver'] == self.mock_vserver_peer['vserver']
+ assert info['peer-vserver'] == self.mock_vserver_peer['peer_vserver']
+
+ def test_get_packet(self):
+ ''' Test vserver_peer_get method '''
+ set_module_args(self.mock_vserver_peer)
+ obj = self.get_vserver_peer_mock_object('vserver_peer')
+ result = obj.vserver_peer_get()
+ assert 'vserver' in result.keys()
+ assert 'peer_vserver' in result.keys()
+ assert 'peer_state' in result.keys()
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer.NetAppONTAPVserverPeer.vserver_peer_get')
+ def test_error_on_missing_params_create(self, vserver_peer_get):
+ ''' Test error thrown from vserver_peer_create '''
+ data = self.mock_vserver_peer
+ del data['applications']
+ set_module_args(data)
+ vserver_peer_get.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_vserver_peer_mock_object().apply()
+ assert exc.value.args[0]['msg'] == "applications parameter is missing"
+
+ @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer.NetAppONTAPVserverPeer.get_peer_cluster_name')
+ def test_get_peer_cluster_called(self, cluster_peer_get):
+ ''' Test get_peer_cluster_name called if peer_cluster is missing '''
+ data = self.mock_vserver_peer
+ del data['peer_cluster']
+ set_module_args(data)
+ cluster_peer_get.return_value = 'something'
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_vserver_peer_mock_object().apply()
+ assert cluster_peer_get.call_count == 1
+
+ def test_get_peer_cluster_packet(self):
+ ''' Test get_peer_cluster_name xml packet '''
+ data = self.mock_vserver_peer
+ set_module_args(data)
+ obj = self.get_vserver_peer_mock_object('cluster')
+ result = obj.get_peer_cluster_name()
+ assert result == data['peer_cluster']
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py
new file mode 100644
index 00000000..3dc513ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py
@@ -0,0 +1,224 @@
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for Ansible module: na_ontap_wwpn_alias '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.ontap.tests.unit.compat import unittest
+from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.ontap.plugins.modules.na_ontap_wwpn_alias \
+ import NetAppOntapWwpnAlias as alias_module # module under test
+
+# REST API canned responses when mocking send_request
+SRR = {
+ # common responses
+ 'is_rest': (200, {}, None),
+ 'is_zapi': (400, {}, "Unreachable"),
+ 'empty_good': (200, {}, None),
+ 'end_of_sequence': (500, None, "Unexpected call to send_request"),
+ 'generic_error': (400, None, "Expected error"),
+ # module specific responses
+ 'get_alias': (
+ 200,
+ {"records": [{
+ "svm": {
+ "uuid": "uuid",
+ "name": "svm"},
+ "alias": "host1",
+ "wwpn": "01:02:03:04:0a:0b:0c:0d"}],
+ "num_records": 1}, None),
+ 'get_svm_uuid': (
+ 200,
+ {"records": [{
+ "uuid": "test_uuid"
+ }]}, None),
+ "no_record": (
+ 200,
+ {"num_records": 0},
+ None)
+}
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' Unit tests for na_ontap_wwpn_alias '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.mock_alias = {
+ 'name': 'host1',
+ 'vserver': 'test_vserver'
+ }
+
+ def mock_args(self):
+ return {
+ 'vserver': self.mock_alias['vserver'],
+ 'name': self.mock_alias['name'],
+ "wwpn": "01:02:03:04:0a:0b:0c:0d",
+ 'hostname': 'test_host',
+ 'username': 'test_user',
+ 'password': 'test_pass!'
+ }
+
+ def get_alias_mock_object(self):
+ alias_obj = alias_module()
+ return alias_obj
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_successful_create(self, mock_request):
+ '''Test successful rest create'''
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_svm_uuid'],
+ SRR['no_record'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_idempotency(self, mock_request):
+ '''Test rest create idempotency'''
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_svm_uuid'],
+ SRR['get_alias'],
+ SRR['no_record'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_create_error(self, mock_request):
+ '''Test rest create error'''
+ data = self.mock_args()
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_svm_uuid'],
+ SRR['no_record'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['msg'] == "Error on creating wwpn alias: Expected error."
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_modify(self, mock_request):
+ '''Test rest modify error'''
+ data = self.mock_args()
+ data['wwpn'] = "01:02:03:04:0a:0b:0c:0e"
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_svm_uuid'],
+ SRR['get_alias'],
+ SRR['empty_good'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_modify_error_delete(self, mock_request):
+ '''Test rest modify error'''
+ data = self.mock_args()
+ data['wwpn'] = "01:02:03:04:0a:0b:0c:0e"
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_svm_uuid'],
+ SRR['get_alias'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['msg'] == "Error on modifying wwpn alias when trying to delete alias: Expected error."
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_modify_error_create(self, mock_request):
+ '''Test rest modify error'''
+ data = self.mock_args()
+ data['wwpn'] = "01:02:03:04:0a:0b:0c:0e"
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_svm_uuid'],
+ SRR['get_alias'],
+ SRR['empty_good'],
+ SRR['generic_error'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['msg'] == "Error on modifying wwpn alias when trying to re-create alias: Expected error."
+
+ @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
+ def test_rest_delete_error(self, mock_request):
+ '''Test rest delete error'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ mock_request.side_effect = [
+ SRR['is_rest'],
+ SRR['get_svm_uuid'],
+ SRR['get_alias'],
+ SRR['generic_error'],
+ SRR['empty_good'],
+ SRR['end_of_sequence']
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.get_alias_mock_object().apply()
+ assert exc.value.args[0]['msg'] == "Error on deleting wwpn alias: Expected error."
diff --git a/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/requirements.txt b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/requirements.txt
new file mode 100644
index 00000000..c7897ee3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp/ontap/tests/unit/requirements.txt
@@ -0,0 +1,3 @@
+netapp-lib
+solidfire-sdk-python
+unittest2 ; python_version < '2.7'